Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 6 May 2019 21:50:46 +0000 (14:50 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 6 May 2019 21:50:46 +0000 (14:50 -0700)
Pull timer updates from Ingo Molnar:
 "This cycle had the following changes:

   - Timer tracing improvements (Anna-Maria Gleixner)

   - Continued tasklet reduction work: remove the hrtimer_tasklet
     (Thomas Gleixner)

   - Fix CPU hotplug remove race in the tick-broadcast mask handling
     code (Thomas Gleixner)

   - Force upper bound for setting CLOCK_REALTIME, to fix ABI
     inconsistencies with handling values that are close to the maximum
     supported and the vagueness of when uptime related wraparound might
     occur. Make the consistent maximum the year 2232 across all
     relevant ABIs and APIs. (Thomas Gleixner)

   - various cleanups and smaller fixes"

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  tick: Fix typos in comments
  tick/broadcast: Fix warning about undefined tick_broadcast_oneshot_offline()
  timekeeping: Force upper bound for setting CLOCK_REALTIME
  timer/trace: Improve timer tracing
  timer/trace: Replace deprecated vsprintf pointer extension %pf by %ps
  timer: Move trace point to get proper index
  tick/sched: Update tick_sched struct documentation
  tick: Remove outgoing CPU from broadcast masks
  timekeeping: Consistently use unsigned int for seqcount snapshot
  softirq: Remove tasklet_hrtimer
  xfrm: Replace hrtimer tasklet with softirq hrtimer
  mac80211_hwsim: Replace hrtimer tasklet with softirq hrtimer

2125 files changed:
.clang-format
.mailmap
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/RCU/Design/Data-Structures/Data-Structures.html
Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
Documentation/RCU/NMI-RCU.txt
Documentation/RCU/UP.txt
Documentation/RCU/checklist.txt
Documentation/RCU/rcu.txt
Documentation/RCU/rcu_dereference.txt
Documentation/RCU/rcubarrier.txt
Documentation/RCU/whatisRCU.txt
Documentation/accounting/psi.txt
Documentation/admin-guide/kernel-parameters.txt
Documentation/atomic_t.txt
Documentation/bpf/btf.rst
Documentation/core-api/cachetlb.rst
Documentation/devicetree/bindings/arm/cpus.yaml
Documentation/devicetree/bindings/hwmon/adc128d818.txt
Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt [moved from Documentation/devicetree/bindings/i2c/i2c-xscale.txt with 100% similarity]
Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt [moved from Documentation/devicetree/bindings/i2c/i2c-mtk.txt with 100% similarity]
Documentation/devicetree/bindings/i2c/i2c-stu300.txt [moved from Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt with 100% similarity]
Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt [moved from Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt with 100% similarity]
Documentation/devicetree/bindings/i2c/i2c-wmt.txt [moved from Documentation/devicetree/bindings/i2c/i2c-vt8500.txt with 100% similarity]
Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
Documentation/devicetree/bindings/net/davinci_emac.txt
Documentation/devicetree/bindings/net/dsa/qca8k.txt
Documentation/devicetree/bindings/net/ethernet.txt
Documentation/devicetree/bindings/net/macb.txt
Documentation/devicetree/bindings/serial/mtk-uart.txt
Documentation/driver-api/usb/power-management.rst
Documentation/filesystems/mount_api.txt
Documentation/i2c/busses/i2c-i801
Documentation/kprobes.txt
Documentation/lzo.txt
Documentation/media/uapi/rc/rc-tables.rst
Documentation/networking/bpf_flow_dissector.rst [new file with mode: 0644]
Documentation/networking/decnet.txt
Documentation/networking/index.rst
Documentation/networking/ip-sysctl.txt
Documentation/networking/msg_zerocopy.rst
Documentation/networking/netdev-FAQ.rst
Documentation/networking/nf_flowtable.txt
Documentation/networking/rxrpc.txt
Documentation/networking/snmp_counter.rst
Documentation/sysctl/vm.txt
Documentation/translations/ko_KR/memory-barriers.txt
Documentation/virtual/kvm/api.txt
Documentation/virtual/kvm/mmu.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/Kconfig
arch/alpha/include/asm/Kbuild
arch/alpha/include/asm/rwsem.h [deleted file]
arch/alpha/include/asm/tlb.h
arch/alpha/include/uapi/asm/kvm_para.h [deleted file]
arch/alpha/kernel/syscalls/syscall.tbl
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/dts/abilis_tb100.dtsi
arch/arc/boot/dts/abilis_tb100_dvk.dts
arch/arc/boot/dts/abilis_tb101.dtsi
arch/arc/boot/dts/abilis_tb101_dvk.dts
arch/arc/boot/dts/abilis_tb10x.dtsi
arch/arc/boot/dts/axc001.dtsi
arch/arc/boot/dts/axc003.dtsi
arch/arc/boot/dts/axc003_idu.dtsi
arch/arc/boot/dts/axs10x_mb.dtsi
arch/arc/boot/dts/hsdk.dts
arch/arc/boot/dts/vdk_axc003.dtsi
arch/arc/boot/dts/vdk_axc003_idu.dtsi
arch/arc/boot/dts/vdk_axs10x_mb.dtsi
arch/arc/configs/hsdk_defconfig
arch/arc/include/asm/Kbuild
arch/arc/include/asm/arcregs.h
arch/arc/include/asm/irqflags-arcv2.h
arch/arc/include/asm/perf_event.h
arch/arc/include/asm/spinlock.h
arch/arc/include/asm/syscall.h
arch/arc/include/asm/tlb.h
arch/arc/include/uapi/asm/Kbuild
arch/arc/kernel/head.S
arch/arc/kernel/intc-arcv2.c
arch/arc/kernel/setup.c
arch/arc/kernel/troubleshoot.c
arch/arc/lib/Makefile
arch/arc/lib/memcpy-archs-unaligned.S [new file with mode: 0644]
arch/arc/lib/memset-archs.S
arch/arc/mm/cache.c
arch/arc/plat-eznps/Kconfig
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/boot/compressed/head.S
arch/arm/boot/dts/am335x-evm.dts
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/am33xx-l4.dtsi
arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
arch/arm/boot/dts/imx6ull-pinfunc-snvs.h
arch/arm/boot/dts/rk3288-tinker.dtsi
arch/arm/boot/dts/rk3288-veyron.dtsi
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/sama5d2-pinfunc.h
arch/arm/configs/imx_v4_v5_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/include/asm/Kbuild
arch/arm/include/asm/kvm_mmu.h
arch/arm/include/asm/stage2_pgtable.h
arch/arm/include/asm/syscall.h
arch/arm/include/asm/tlb.h
arch/arm/include/uapi/asm/Kbuild
arch/arm/include/uapi/asm/kvm_para.h [deleted file]
arch/arm/kernel/head-nommu.S
arch/arm/kernel/signal.c
arch/arm/kernel/stacktrace.c
arch/arm/mach-at91/pm.c
arch/arm/mach-cns3xxx/core.c
arch/arm/mach-imx/cpuidle-imx6q.c
arch/arm/mach-imx/mach-imx51.c
arch/arm/mach-iop13xx/setup.c
arch/arm/mach-iop13xx/tpmi.c
arch/arm/mach-milbeaut/platsmp.c
arch/arm/mach-omap1/board-ams-delta.c
arch/arm/mach-omap2/display.c
arch/arm/plat-iop/adma.c
arch/arm/plat-orion/common.c
arch/arm/tools/syscall.tbl
arch/arm64/Kconfig
arch/arm64/Kconfig.platforms
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/nvidia/tegra186.dtsi
arch/arm64/boot/dts/renesas/r8a774c0.dtsi
arch/arm64/boot/dts/renesas/r8a77990.dtsi
arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
arch/arm64/boot/dts/rockchip/rk3328.dtsi
arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
arch/arm64/include/asm/Kbuild
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/futex.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/module.h
arch/arm64/include/asm/syscall.h
arch/arm64/include/asm/tlb.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kernel/sdei.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/traps.c
arch/arm64/kvm/reset.c
arch/arm64/mm/init.c
arch/c6x/Kconfig
arch/c6x/include/asm/Kbuild
arch/c6x/include/asm/syscall.h
arch/c6x/include/asm/tlb.h
arch/c6x/include/uapi/asm/Kbuild
arch/csky/Kconfig
arch/csky/include/asm/syscall.h
arch/h8300/Kconfig
arch/h8300/include/asm/Kbuild
arch/h8300/include/asm/syscall.h
arch/h8300/include/asm/tlb.h
arch/h8300/include/uapi/asm/Kbuild
arch/hexagon/Kconfig
arch/hexagon/include/asm/Kbuild
arch/hexagon/include/asm/syscall.h
arch/hexagon/include/asm/tlb.h
arch/hexagon/include/uapi/asm/kvm_para.h [deleted file]
arch/ia64/Kconfig
arch/ia64/include/asm/Kbuild
arch/ia64/include/asm/machvec.h
arch/ia64/include/asm/machvec_sn2.h
arch/ia64/include/asm/rwsem.h [deleted file]
arch/ia64/include/asm/syscall.h
arch/ia64/include/asm/tlb.h
arch/ia64/include/asm/tlbflush.h
arch/ia64/include/uapi/asm/Kbuild
arch/ia64/kernel/ptrace.c
arch/ia64/kernel/setup.c
arch/ia64/kernel/syscalls/syscall.tbl
arch/ia64/mm/tlb.c
arch/ia64/sn/kernel/sn2/sn2_smp.c
arch/m68k/Kconfig
arch/m68k/include/asm/Kbuild
arch/m68k/include/asm/tlb.h
arch/m68k/include/uapi/asm/Kbuild
arch/m68k/kernel/syscalls/syscall.tbl
arch/microblaze/Kconfig
arch/microblaze/include/asm/Kbuild
arch/microblaze/include/asm/syscall.h
arch/microblaze/include/asm/tlb.h
arch/microblaze/include/uapi/asm/Kbuild
arch/microblaze/kernel/syscalls/syscall.tbl
arch/mips/Kconfig
arch/mips/ath79/setup.c
arch/mips/bcm47xx/workarounds.c
arch/mips/configs/generic/board-ocelot.config
arch/mips/include/asm/jump_label.h
arch/mips/include/asm/syscall.h
arch/mips/include/asm/tlb.h
arch/mips/include/uapi/asm/posix_types.h
arch/mips/kernel/kgdb.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/syscalls/syscall_n32.tbl
arch/mips/kernel/syscalls/syscall_n64.tbl
arch/mips/kernel/syscalls/syscall_o32.tbl
arch/mips/kernel/vmlinux.lds.S
arch/mips/loongson64/lemote-2f/irq.c
arch/mips/net/ebpf_jit.c
arch/mips/sgi-ip27/ip27-irq.c
arch/nds32/Kconfig
arch/nds32/include/asm/syscall.h
arch/nds32/include/asm/tlb.h
arch/nds32/include/asm/tlbflush.h
arch/nios2/Kconfig
arch/nios2/include/asm/Kbuild
arch/nios2/include/asm/syscall.h
arch/nios2/include/asm/tlb.h
arch/nios2/include/uapi/asm/Kbuild
arch/openrisc/Kconfig
arch/openrisc/include/asm/Kbuild
arch/openrisc/include/asm/syscall.h
arch/openrisc/include/asm/tlb.h
arch/openrisc/include/uapi/asm/Kbuild
arch/parisc/Kconfig
arch/parisc/include/asm/Kbuild
arch/parisc/include/asm/ptrace.h
arch/parisc/include/asm/syscall.h
arch/parisc/include/asm/tlb.h
arch/parisc/include/uapi/asm/Kbuild
arch/parisc/kernel/process.c
arch/parisc/kernel/setup.c
arch/parisc/kernel/stacktrace.c
arch/parisc/kernel/syscalls/syscall.tbl
arch/powerpc/Kconfig
arch/powerpc/configs/skiroot_defconfig
arch/powerpc/include/asm/Kbuild
arch/powerpc/include/asm/mmu.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/include/asm/syscall.h
arch/powerpc/include/asm/tlb.h
arch/powerpc/include/asm/vdso_datapage.h
arch/powerpc/kernel/cpu_setup_6xx.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/head_32.S
arch/powerpc/kernel/kvm.c
arch/powerpc/kernel/security.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/syscalls/syscall.tbl
arch/powerpc/kernel/vdso32/gettimeofday.S
arch/powerpc/kernel/vdso64/gettimeofday.S
arch/powerpc/kvm/book3s_64_vio.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/lib/memcmp_64.S
arch/powerpc/mm/hash_low_32.S
arch/powerpc/mm/mmu_context_iommu.c
arch/powerpc/mm/ppc_mmu_32.c
arch/powerpc/net/bpf_jit.h
arch/powerpc/net/bpf_jit32.h
arch/powerpc/net/bpf_jit64.h
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/platforms/Kconfig.cputype
arch/powerpc/platforms/pseries/pseries_energy.c
arch/powerpc/platforms/pseries/ras.c
arch/riscv/Kconfig
arch/riscv/configs/rv32_defconfig [new file with mode: 0644]
arch/riscv/include/asm/fixmap.h
arch/riscv/include/asm/syscall.h
arch/riscv/include/asm/tlb.h
arch/riscv/include/asm/uaccess.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/module.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/stacktrace.c
arch/riscv/mm/Makefile
arch/riscv/mm/init.c
arch/s390/Kconfig
arch/s390/boot/mem_detect.c
arch/s390/include/asm/Kbuild
arch/s390/include/asm/ap.h
arch/s390/include/asm/elf.h
arch/s390/include/asm/lowcore.h
arch/s390/include/asm/syscall.h
arch/s390/include/asm/tlb.h
arch/s390/kernel/fpu.c
arch/s390/kernel/nospec-branch.c
arch/s390/kernel/perf_cpum_cf_diag.c
arch/s390/kernel/smp.c
arch/s390/kernel/stacktrace.c
arch/s390/kernel/syscalls/syscall.tbl
arch/s390/kernel/vtime.c
arch/s390/mm/pgalloc.c
arch/sh/Kconfig
arch/sh/boards/of-generic.c
arch/sh/include/asm/Kbuild
arch/sh/include/asm/pgalloc.h
arch/sh/include/asm/syscall_32.h
arch/sh/include/asm/syscall_64.h
arch/sh/include/asm/tlb.h
arch/sh/include/uapi/asm/Kbuild
arch/sh/kernel/stacktrace.c
arch/sh/kernel/syscalls/syscall.tbl
arch/sparc/Kconfig
arch/sparc/include/asm/Kbuild
arch/sparc/include/asm/syscall.h
arch/sparc/include/asm/tlb_32.h
arch/sparc/include/uapi/asm/kvm_para.h [deleted file]
arch/sparc/kernel/pci_sun4v.c
arch/sparc/kernel/syscalls/syscall.tbl
arch/um/include/asm/syscall-generic.h
arch/um/include/asm/tlb.h
arch/um/kernel/stacktrace.c
arch/unicore32/Kconfig
arch/unicore32/include/asm/Kbuild
arch/unicore32/include/asm/tlb.h
arch/unicore32/include/uapi/asm/Kbuild
arch/unicore32/kernel/stacktrace.c
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/compressed/misc.c
arch/x86/boot/compressed/misc.h
arch/x86/boot/string.c
arch/x86/crypto/poly1305-avx2-x86_64.S
arch/x86/crypto/poly1305-sse2-x86_64.S
arch/x86/entry/entry_32.S
arch/x86/entry/vdso/vclock_gettime.c
arch/x86/events/amd/core.c
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/cstate.c
arch/x86/events/intel/ds.c
arch/x86/events/intel/lbr.c
arch/x86/events/intel/pt.c
arch/x86/events/intel/rapl.c
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore.h
arch/x86/events/intel/uncore_snb.c
arch/x86/events/msr.c
arch/x86/events/perf_event.h
arch/x86/hyperv/hv_init.c
arch/x86/ia32/ia32_signal.c
arch/x86/include/asm/alternative-asm.h
arch/x86/include/asm/alternative.h
arch/x86/include/asm/asm.h
arch/x86/include/asm/bitops.h
arch/x86/include/asm/cpu_device_id.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/intel_ds.h
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/perf_event.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/processor-cyrix.h
arch/x86/include/asm/realmode.h
arch/x86/include/asm/rwsem.h [deleted file]
arch/x86/include/asm/smap.h
arch/x86/include/asm/stacktrace.h
arch/x86/include/asm/switch_to.h
arch/x86/include/asm/syscall.h
arch/x86/include/asm/tlb.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess_64.h
arch/x86/include/asm/xen/hypercall.h
arch/x86/include/uapi/asm/kvm.h
arch/x86/include/uapi/asm/perf_regs.h
arch/x86/include/uapi/asm/vmx.h
arch/x86/kernel/aperture_64.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/cyrix.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/resctrl/monitor.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/hpet.c
arch/x86/kernel/hw_breakpoint.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/perf_regs.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/reboot.c
arch/x86/kernel/setup.c
arch/x86/kernel/signal.c
arch/x86/kernel/stacktrace.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kvm/emulate.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/mmutrace.h
arch/x86/kvm/pmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/trace.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmenter.S
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/lib/Makefile
arch/x86/lib/copy_user_64.S
arch/x86/lib/csum-partial_64.c
arch/x86/lib/memcpy_64.S
arch/x86/lib/rwsem.S [deleted file]
arch/x86/lib/usercopy_64.c
arch/x86/mm/dump_pagetables.c
arch/x86/mm/init.c
arch/x86/mm/ioremap.c
arch/x86/mm/kaslr.c
arch/x86/mm/mmap.c
arch/x86/mm/pti.c
arch/x86/mm/tlb.c
arch/x86/platform/efi/quirks.c
arch/x86/realmode/init.c
arch/x86/um/Kconfig
arch/x86/um/Makefile
arch/xtensa/Kconfig
arch/xtensa/include/asm/Kbuild
arch/xtensa/include/asm/processor.h
arch/xtensa/include/asm/syscall.h
arch/xtensa/include/asm/tlb.h
arch/xtensa/include/uapi/asm/Kbuild
arch/xtensa/kernel/entry.S
arch/xtensa/kernel/stacktrace.c
arch/xtensa/kernel/syscalls/syscall.tbl
arch/xtensa/mm/mmu.c
block/bfq-iosched.c
block/bfq-iosched.h
block/bfq-wf2q.c
block/bio.c
block/blk-cgroup.c
block/blk-core.c
block/blk-flush.c
block/blk-iolatency.c
block/blk-mq-sched.c
block/blk-mq.c
block/blk-mq.h
block/blk-sysfs.c
crypto/lrw.c
crypto/testmgr.h
crypto/xts.c
drivers/acpi/acpica/nsobject.c
drivers/acpi/bus.c
drivers/acpi/cppc_acpi.c
drivers/acpi/nfit/core.c
drivers/acpi/nfit/intel.c
drivers/acpi/utils.c
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/ata/libata-zpodd.c
drivers/atm/firestream.c
drivers/auxdisplay/Kconfig
drivers/auxdisplay/Makefile
drivers/auxdisplay/charlcd.c
drivers/auxdisplay/hd44780.c
drivers/auxdisplay/panel.c
drivers/base/memory.c
drivers/base/power/domain.c
drivers/base/swnode.c
drivers/block/loop.c
drivers/block/null_blk_main.c
drivers/block/paride/pcd.c
drivers/block/paride/pf.c
drivers/block/rbd.c
drivers/block/virtio_blk.c
drivers/block/xsysace.c
drivers/block/zram/zram_drv.c
drivers/bluetooth/btusb.c
drivers/char/Kconfig
drivers/char/ipmi/ipmi_dmi.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_hardcode.c
drivers/char/tpm/eventlog/tpm2.c
drivers/char/tpm/tpm-dev-common.c
drivers/char/tpm/tpm-interface.c
drivers/clk/at91/clk-programmable.c
drivers/clk/at91/pmc.h
drivers/clk/at91/sama5d2.c
drivers/clk/clkdev.c
drivers/clk/imx/clk-pll14xx.c
drivers/clk/mediatek/clk-gate.c
drivers/clk/meson/clk-pll.c
drivers/clk/meson/g12a.c
drivers/clk/meson/gxbb.c
drivers/clk/meson/vid-pll-div.c
drivers/clk/sunxi-ng/ccu_nkmp.c
drivers/clk/x86/clk-pmc-atom.c
drivers/clocksource/Kconfig
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/clps711x-timer.c
drivers/clocksource/mips-gic-timer.c
drivers/clocksource/tcb_clksrc.c
drivers/clocksource/timer-oxnas-rps.c
drivers/clocksource/timer-riscv.c
drivers/clocksource/timer-ti-dm.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/scpi-cpufreq.c
drivers/crypto/caam/caamhash.c
drivers/dma/bcm2835-dma.c
drivers/dma/mediatek/mtk-cqdma.c
drivers/dma/sh/rcar-dmac.c
drivers/dma/stm32-mdma.c
drivers/extcon/Kconfig
drivers/firmware/dmi_scan.c
drivers/firmware/efi/arm-runtime.c
drivers/firmware/efi/libstub/Makefile
drivers/gpio/gpio-adnp.c
drivers/gpio/gpio-aspeed.c
drivers/gpio/gpio-eic-sprd.c
drivers/gpio/gpio-exar.c
drivers/gpio/gpio-mockup.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_file.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/icl_dsi.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
drivers/gpu/drm/i915/vlv_dsi.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/mediatek/mtk_dpi.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_gem.c
drivers/gpu/drm/mediatek/mtk_drm_gem.h
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/meson/meson_dw_hdmi.c
drivers/gpu/drm/nouveau/nouveau_debugfs.c
drivers/gpu/drm/nouveau/nouveau_dmem.c
drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
drivers/gpu/drm/qxl/qxl_drv.c
drivers/gpu/drm/qxl/qxl_prime.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
drivers/gpu/drm/sun4i/sun8i_tcon_top.c
drivers/gpu/drm/tegra/hdmi.c
drivers/gpu/drm/tegra/hub.c
drivers/gpu/drm/tegra/vic.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_memory.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/udl/udl_connector.c
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_drv.h
drivers/gpu/drm/udl/udl_gem.c
drivers/gpu/drm/udl/udl_main.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/virtio/virtgpu_drv.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_prime.c
drivers/gpu/drm/vkms/vkms_gem.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
drivers/gpu/host1x/hw/channel_hw.c
drivers/gpu/ipu-v3/ipu-dp.c
drivers/hid/Kconfig
drivers/hid/hid-core.c
drivers/hid/hid-debug.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-quirks.c
drivers/hid/hid-steam.c
drivers/hid/hid-uclogic-params.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hwmon/Kconfig
drivers/hwmon/ntc_thermistor.c
drivers/hwmon/occ/common.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-designware-master.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-synquacer.c
drivers/i2c/i2c-core-base.c
drivers/i3c/master.c
drivers/i3c/master/dw-i3c-master.c
drivers/iio/accel/kxcjk-1013.c
drivers/iio/adc/ad_sigma_delta.c
drivers/iio/adc/at91_adc.c
drivers/iio/adc/xilinx-xadc-core.c
drivers/iio/chemical/Kconfig
drivers/iio/chemical/bme680.h
drivers/iio/chemical/bme680_core.c
drivers/iio/chemical/bme680_i2c.c
drivers/iio/chemical/bme680_spi.c
drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
drivers/iio/dac/mcp4725.c
drivers/iio/gyro/bmg160_core.c
drivers/iio/gyro/mpu3050-core.c
drivers/iio/industrialio-buffer.c
drivers/iio/industrialio-core.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/qp.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/hns/hns_roce_hem.c
drivers/infiniband/hw/hns/hns_roce_mr.c
drivers/infiniband/hw/hns/hns_roce_qp.c
drivers/infiniband/hw/i40iw/i40iw_utils.c
drivers/infiniband/hw/mlx4/alias_GUID.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
drivers/infiniband/sw/rdmavt/mr.c
drivers/input/keyboard/Kconfig
drivers/input/keyboard/snvs_pwrkey.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/rmi4/rmi_driver.c
drivers/input/rmi4/rmi_f11.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_types.h
drivers/iommu/intel-iommu.c
drivers/iommu/io-pgtable-arm-v7s.c
drivers/iommu/iommu.c
drivers/iommu/iova.c
drivers/irqchip/irq-ath79-misc.c
drivers/irqchip/irq-brcmstb-l2.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-imx-irqsteer.c
drivers/irqchip/irq-ls1x.c
drivers/irqchip/irq-mbigen.c
drivers/irqchip/irq-mmp.c
drivers/irqchip/irq-mvebu-sei.c
drivers/irqchip/irq-stm32-exti.c
drivers/isdn/hardware/mISDN/hfcmulti.c
drivers/isdn/mISDN/socket.c
drivers/leds/leds-pca9532.c
drivers/leds/trigger/ledtrig-netdev.c
drivers/lightnvm/pblk-read.c
drivers/md/dm-bufio.c
drivers/md/dm-core.h
drivers/md/dm-init.c
drivers/md/dm-integrity.c
drivers/md/dm-rq.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/md/persistent-data/dm-block-manager.c
drivers/mfd/Kconfig
drivers/mfd/sprd-sc27xx-spi.c
drivers/mfd/twl-core.c
drivers/misc/fastrpc.c
drivers/misc/habanalabs/command_submission.c
drivers/misc/habanalabs/debugfs.c
drivers/misc/habanalabs/device.c
drivers/misc/habanalabs/goya/goya.c
drivers/misc/habanalabs/habanalabs.h
drivers/misc/habanalabs/hw_queue.c
drivers/misc/habanalabs/memory.c
drivers/misc/habanalabs/mmu.c
drivers/mmc/host/alcor.c
drivers/mmc/host/davinci_mmc.c
drivers/mmc/host/mxcmmc.c
drivers/mmc/host/pxamci.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/sdhci-omap.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/mtd/nand/raw/marvell_nand.c
drivers/net/Kconfig
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_sysfs_slave.c
drivers/net/dsa/bcm_sf2_cfp.c
drivers/net/dsa/mv88e6xxx/port.c
drivers/net/dsa/qca8k.c
drivers/net/dsa/qca8k.h
drivers/net/ethernet/3com/3c515.c
drivers/net/ethernet/8390/mac8390.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/atheros/atlx/atl1.c
drivers/net/ethernet/atheros/atlx/atl1.h
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/atheros/atlx/atl2.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns/hnae.c
drivers/net/ethernet/hisilicon/hns/hnae.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
drivers/net/ethernet/hisilicon/hns_mdio.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/core_env.c
drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/micrel/ks8851.c
drivers/net/ethernet/micrel/ks8851.h
drivers/net/ethernet/micrel/ks8851_mll.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/neterion/vxge/vxge-config.c
drivers/net/ethernet/netronome/nfp/abm/cls.c
drivers/net/ethernet/netronome/nfp/flower/action.c
drivers/net/ethernet/netronome/nfp/flower/cmsg.h
drivers/net/ethernet/netronome/nfp/flower/match.c
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_int.c
drivers/net/ethernet/qlogic/qed/qed_int.h
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qede/qede_ptp.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/realtek/atp.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/ethernet/stmicro/stmmac/descs_com.h
drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/stmicro/stmmac/hwif.h
drivers/net/ethernet/stmicro/stmmac/norm_desc.c
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/ti/netcp_ethss.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ieee802154/adf7242.c
drivers/net/ieee802154/mac802154_hwsim.c
drivers/net/ieee802154/mcr20a.c
drivers/net/phy/Kconfig
drivers/net/phy/broadcom.c
drivers/net/phy/dp83822.c
drivers/net/phy/marvell.c
drivers/net/phy/meson-gxl.c
drivers/net/phy/phy_device.c
drivers/net/phy/spi_ks8995.c
drivers/net/slip/slhc.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/aqc111.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/qmi_wwan.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/coredump.c
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/pci.h
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
drivers/net/wireless/intel/iwlwifi/cfg/5000.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/file.h
drivers/net/wireless/intel/iwlwifi/fw/init.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/marvell/mwifiex/sdio.c
drivers/net/wireless/mediatek/mt76/dma.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mt76.h
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
drivers/net/wireless/mediatek/mt76/mt7603/dma.c
drivers/net/wireless/mediatek/mt76/mt7603/init.c
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
drivers/net/wireless/mediatek/mt76/mt7603/main.c
drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
drivers/net/wireless/mediatek/mt76/mt7603/soc.c
drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
drivers/net/wireless/mediatek/mt76/mt76x02.h
drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
drivers/net/wireless/mediatek/mt76/mt76x2/init.c
drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
drivers/net/wireless/mediatek/mt76/tx.c
drivers/net/wireless/mediatek/mt76/usb.c
drivers/net/wireless/mediatek/mt7601u/usb.c
drivers/net/wireless/ralink/rt2x00/rt2x00.h
drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
drivers/nfc/st95hf/core.c
drivers/nvdimm/btt_devs.c
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/pmem.c
drivers/nvdimm/security.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/multipath.c
drivers/nvme/host/tcp.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/core.c
drivers/nvme/target/discovery.c
drivers/nvme/target/io-cmd-file.c
drivers/nvme/target/nvmet.h
drivers/of/of_net.c
drivers/parisc/iosapic.c
drivers/parport/daisy.c
drivers/parport/probe.c
drivers/parport/share.c
drivers/pci/hotplug/pciehp_ctrl.c
drivers/pci/pci.c
drivers/pci/pci.h
drivers/pci/pcie/Kconfig
drivers/pci/pcie/Makefile
drivers/pci/pcie/bw_notification.c
drivers/pci/pcie/portdrv.h
drivers/pci/pcie/portdrv_core.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/phy/allwinner/phy-sun4i-usb.c
drivers/platform/chrome/cros_ec_debugfs.c
drivers/platform/chrome/wilco_ec/mailbox.c
drivers/platform/x86/pmc_atom.c
drivers/power/supply/cpcap-battery.c
drivers/power/supply/goldfish_battery.c
drivers/power/supply/power_supply_sysfs.c
drivers/reset/reset-meson-audio-arb.c
drivers/rtc/Kconfig
drivers/rtc/rtc-cros-ec.c
drivers/rtc/rtc-da9063.c
drivers/rtc/rtc-sh.c
drivers/s390/block/dasd_eckd.c
drivers/s390/char/con3270.c
drivers/s390/char/fs3270.c
drivers/s390/char/raw3270.c
drivers/s390/char/raw3270.h
drivers/s390/char/tty3270.c
drivers/s390/cio/chsc.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/pkey_api.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_ext.h
drivers/s390/scsi/zfcp_fc.c
drivers/s390/scsi/zfcp_scsi.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/commsup.c
drivers/scsi/aic7xxx/aic7770_osm.c
drivers/scsi/aic7xxx/aic7xxx.h
drivers/scsi/aic7xxx/aic7xxx_osm.c
drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
drivers/scsi/csiostor/csio_scsi.c
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_dh.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/storvsc_drv.c
drivers/scsi/virtio_scsi.c
drivers/soc/bcm/bcm2835-power.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/axis-fifo/Kconfig
drivers/staging/comedi/comedidev.h
drivers/staging/comedi/drivers.c
drivers/staging/comedi/drivers/ni_mio_common.c
drivers/staging/comedi/drivers/ni_usb6501.c
drivers/staging/comedi/drivers/vmk80xx.c
drivers/staging/erofs/data.c
drivers/staging/erofs/dir.c
drivers/staging/erofs/unzip_vle.c
drivers/staging/erofs/unzip_vle_lz4.c
drivers/staging/iio/adc/ad7192.c
drivers/staging/iio/meter/ade7854.c
drivers/staging/most/core.c
drivers/staging/mt7621-dts/gbpc1.dts
drivers/staging/mt7621-dts/mt7621.dtsi
drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt [deleted file]
drivers/staging/mt7621-eth/Kconfig [deleted file]
drivers/staging/mt7621-eth/Makefile [deleted file]
drivers/staging/mt7621-eth/TODO [deleted file]
drivers/staging/mt7621-eth/ethtool.c [deleted file]
drivers/staging/mt7621-eth/ethtool.h [deleted file]
drivers/staging/mt7621-eth/gsw_mt7620.h [deleted file]
drivers/staging/mt7621-eth/gsw_mt7621.c [deleted file]
drivers/staging/mt7621-eth/mdio.c [deleted file]
drivers/staging/mt7621-eth/mdio.h [deleted file]
drivers/staging/mt7621-eth/mdio_mt7620.c [deleted file]
drivers/staging/mt7621-eth/mtk_eth_soc.c [deleted file]
drivers/staging/mt7621-eth/mtk_eth_soc.h [deleted file]
drivers/staging/mt7621-eth/soc_mt7621.c [deleted file]
drivers/staging/mt7621-pci/Kconfig
drivers/staging/octeon/ethernet-mdio.c
drivers/staging/octeon/ethernet.c
drivers/staging/octeon/octeon-ethernet.h
drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
drivers/staging/rtl8188eu/core/rtw_xmit.c
drivers/staging/rtl8188eu/include/rtw_xmit.h
drivers/staging/rtl8712/rtl8712_cmd.c
drivers/staging/rtl8712/rtl8712_cmd.h
drivers/staging/rtl8723bs/core/rtw_xmit.c
drivers/staging/rtl8723bs/include/rtw_xmit.h
drivers/staging/rtlwifi/phydm/rtl_phydm.c
drivers/staging/rtlwifi/rtl8822be/fw.c
drivers/staging/speakup/speakup_soft.c
drivers/staging/speakup/spk_priv.h
drivers/staging/speakup/synth.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
drivers/staging/vt6655/device_main.c
drivers/thermal/broadcom/bcm2835_thermal.c
drivers/thermal/cpu_cooling.c
drivers/thermal/intel/int340x_thermal/int3400_thermal.c
drivers/thermal/intel/intel_powerclamp.c
drivers/thermal/mtk_thermal.c
drivers/thermal/samsung/exynos_tmu.c
drivers/tty/rocket.c
drivers/tty/serial/ar933x_uart.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/kgdboc.c
drivers/tty/serial/max310x.c
drivers/tty/serial/mvebu-uart.c
drivers/tty/serial/mxs-auart.c
drivers/tty/serial/qcom_geni_serial.c
drivers/tty/serial/sc16is7xx.c
drivers/tty/serial/sh-sci.c
drivers/tty/tty_port.c
drivers/tty/vt/vt.c
drivers/usb/class/cdc-acm.c
drivers/usb/common/common.c
drivers/usb/core/driver.c
drivers/usb/core/hcd.c
drivers/usb/core/message.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/gadget/udc/net2272.c
drivers/usb/gadget/udc/net2280.c
drivers/usb/host/u132-hcd.c
drivers/usb/host/xhci-dbgcap.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-rcar.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.h
drivers/usb/misc/usb251xb.c
drivers/usb/misc/yurex.c
drivers/usb/mtu3/Kconfig
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/mos7720.c
drivers/usb/serial/option.c
drivers/usb/storage/realtek_cr.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/tcpm/wcove.c
drivers/usb/usbip/stub_rx.c
drivers/usb/usbip/usbip_common.h
drivers/vfio/pci/vfio_pci.c
drivers/vfio/vfio_iommu_spapr_tce.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/vhost.c
drivers/video/fbdev/efifb.c
drivers/virt/vboxguest/vboxguest_core.c
drivers/virt/vboxguest/vboxguest_core.h
drivers/virt/vboxguest/vboxguest_linux.c
drivers/virt/vboxguest/vboxguest_utils.c
drivers/virt/vboxguest/vboxguest_version.h
drivers/virt/vboxguest/vmmdev.h
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_ring.c
drivers/w1/masters/ds2490.c
drivers/xen/privcmd-buf.c
drivers/xen/xenbus/xenbus_dev_frontend.c
fs/afs/callback.c
fs/afs/cmservice.c
fs/afs/fsclient.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/rxrpc.c
fs/afs/server.c
fs/afs/write.c
fs/afs/yfsclient.c
fs/aio.c
fs/block_dev.c
fs/btrfs/extent-tree.c
fs/btrfs/file-item.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ordered-data.c
fs/btrfs/props.c
fs/btrfs/qgroup.c
fs/btrfs/raid56.c
fs/btrfs/ref-verify.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/ceph/dir.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/snap.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/smb2file.c
fs/cifs/smb2maperror.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2proto.h
fs/cifs/trace.h
fs/dax.c
fs/debugfs/inode.c
fs/ext4/ext4_jbd2.h
fs/ext4/file.c
fs/ext4/indirect.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/resize.c
fs/ext4/super.c
fs/fs_parser.c
fs/fuse/dev.c
fs/hugetlbfs/inode.c
fs/inode.c
fs/io_uring.c
fs/iomap.c
fs/jffs2/readinode.c
fs/jffs2/super.c
fs/lockd/host.c
fs/locks.c
fs/nfs/client.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/nfs42proc.c
fs/nfs/nfs4file.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4xdr.c
fs/nfs/super.c
fs/nfsd/nfs3proc.c
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4state.c
fs/nfsd/state.h
fs/notify/fanotify/fanotify.c
fs/notify/fanotify/fanotify_user.c
fs/notify/inotify/inotify_user.c
fs/notify/mark.c
fs/ocfs2/refcounttree.c
fs/open.c
fs/pipe.c
fs/proc/base.c
fs/proc/kcore.c
fs/proc/proc_sysctl.c
fs/proc/task_mmu.c
fs/read_write.c
fs/splice.c
fs/super.c
fs/ubifs/super.c
fs/udf/inode.c
fs/udf/truncate.c
fs/udf/udfdecl.h
fs/ufs/util.h
fs/userfaultfd.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/scrub/btree.c
fs/xfs/scrub/dabtree.c
fs/xfs/xfs_discard.c
fs/xfs/xfs_file.c
include/acpi/acoutput.h
include/acpi/platform/aclinux.h
include/asm-generic/rwsem.h [deleted file]
include/asm-generic/syscall.h
include/asm-generic/tlb.h
include/drm/drm_modeset_helper_vtables.h
include/drm/ttm/ttm_bo_driver.h
include/dt-bindings/clock/sifive-fu540-prci.h [new file with mode: 0644]
include/dt-bindings/reset/amlogic,meson-g12a-reset.h
include/keys/trusted.h
include/linux/atalk.h
include/linux/bio.h
include/linux/bitrev.h
include/linux/blk-mq.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/bpf_verifier.h
include/linux/brcmphy.h
include/linux/bvec.h
include/linux/ceph/libceph.h
include/linux/clk.h
include/linux/compiler.h
include/linux/cpu.h
include/linux/device.h
include/linux/dmi.h
include/linux/efi.h
include/linux/elevator.h
include/linux/etherdevice.h
include/linux/fs.h
include/linux/ftrace.h
include/linux/hugetlb.h
include/linux/irq.h
include/linux/irqchip/arm-gic.h
include/linux/jump_label_ratelimit.h
include/linux/kcore.h
include/linux/kernel.h
include/linux/kprobes.h
include/linux/kvm_host.h
include/linux/list.h
include/linux/lockdep.h
include/linux/memcontrol.h
include/linux/mii.h
include/linux/mlx5/driver.h
include/linux/mlx5/qp.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/net.h
include/linux/netdevice.h
include/linux/nvme.h
include/linux/page-isolation.h
include/linux/parport.h
include/linux/perf_event.h
include/linux/pipe_fs_i.h
include/linux/platform_data/gpio/gpio-amd-fch.h
include/linux/platform_data/x86/clk-pmc-atom.h
include/linux/ptrace.h
include/linux/rcupdate.h
include/linux/rcuwait.h
include/linux/rwsem-spinlock.h [deleted file]
include/linux/rwsem.h
include/linux/sbitmap.h
include/linux/sched.h
include/linux/sched/mm.h
include/linux/sched/signal.h
include/linux/sched/topology.h
include/linux/shmem_fs.h
include/linux/slab.h
include/linux/smpboot.h
include/linux/socket.h
include/linux/srcu.h
include/linux/stackdepot.h
include/linux/stacktrace.h
include/linux/string.h
include/linux/sunrpc/sched.h
include/linux/uaccess.h
include/linux/uio.h
include/linux/usb.h
include/linux/vbox_utils.h
include/linux/virtio_ring.h
include/misc/charlcd.h
include/net/act_api.h
include/net/af_rxrpc.h
include/net/cfg80211.h
include/net/ip.h
include/net/mac80211.h
include/net/net_namespace.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_l4proto.h
include/net/netns/hash.h
include/net/netrom.h
include/net/nfc/nci_core.h
include/net/sch_generic.h
include/net/sctp/checksum.h
include/net/sctp/command.h
include/net/sock.h
include/net/tc_act/tc_gact.h
include/net/tls.h
include/net/xdp_sock.h
include/net/xfrm.h
include/sound/soc.h
include/trace/events/syscalls.h
include/uapi/linux/Kbuild
include/uapi/linux/bpf.h
include/uapi/linux/ethtool.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/vbox_vmmdev_types.h
include/uapi/rdma/mlx5-abi.h
include/uapi/sound/asound.h
init/main.c
kernel/Kconfig.locks
kernel/Makefile
kernel/backtracetest.c
kernel/bpf/cpumap.c
kernel/bpf/inode.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup/cpuset.c
kernel/cpu.c
kernel/dma/debug.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/futex.c
kernel/iomem.c
kernel/irq/chip.c
kernel/irq/devres.c
kernel/irq/irqdesc.c
kernel/irq/manage.c
kernel/irq/timings.c
kernel/irq_work.c
kernel/jump_label.c
kernel/kexec_core.c
kernel/kprobes.c
kernel/latencytop.c
kernel/livepatch/transition.c
kernel/locking/Makefile
kernel/locking/lock_events.c [new file with mode: 0644]
kernel/locking/lock_events.h [new file with mode: 0644]
kernel/locking/lock_events_list.h [new file with mode: 0644]
kernel/locking/lockdep.c
kernel/locking/lockdep_internals.h
kernel/locking/locktorture.c
kernel/locking/percpu-rwsem.c
kernel/locking/qspinlock.c
kernel/locking/qspinlock_paravirt.h
kernel/locking/qspinlock_stat.h
kernel/locking/rwsem-spinlock.c [deleted file]
kernel/locking/rwsem-xadd.c
kernel/locking/rwsem.c
kernel/locking/rwsem.h
kernel/power/Kconfig
kernel/power/hibernate.c
kernel/power/suspend.c
kernel/ptrace.c
kernel/rcu/rcu.h
kernel/rcu/rcuperf.c
kernel/rcu/rcutorture.c
kernel/rcu/srcutiny.c
kernel/rcu/srcutree.c
kernel/rcu/tiny.c
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_exp.h
kernel/rcu/tree_plugin.h
kernel/rcu/tree_stall.h [new file with mode: 0644]
kernel/rcu/update.c
kernel/resource.c
kernel/rseq.c
kernel/sched/core.c
kernel/sched/cpufreq.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/deadline.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/isolation.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/topology.c
kernel/seccomp.c
kernel/signal.c
kernel/stacktrace.c
kernel/sysctl.c
kernel/time/alarmtimer.c
kernel/time/jiffies.c
kernel/time/sched_clock.c
kernel/time/tick-common.c
kernel/time/tick-sched.c
kernel/time/timekeeping.h
kernel/torture.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_branch.c
kernel/trace/trace_dynevent.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_stack.c
kernel/trace/trace_syscalls.c
kernel/watchdog.c
kernel/watchdog_hld.c
kernel/workqueue.c
kernel/workqueue_internal.h
lib/Kconfig
lib/Kconfig.debug
lib/Makefile
lib/fault-inject.c
lib/iov_iter.c
lib/lzo/lzo1x_compress.c
lib/lzo/lzo1x_decompress_safe.c
lib/rhashtable.c
lib/sbitmap.c
lib/stackdepot.c
lib/string.c
lib/strncpy_from_user.c
lib/strnlen_user.c
lib/syscall.c
lib/test_vmalloc.c
lib/ubsan.c
lib/ubsan.h
mm/compaction.c
mm/debug.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/kasan/Makefile
mm/kasan/common.c
mm/kasan/kasan.h
mm/kasan/report.c
mm/kmemleak.c
mm/madvise.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/mmap.c
mm/mmu_gather.c
mm/page_alloc.c
mm/page_isolation.c
mm/page_owner.c
mm/percpu.c
mm/shmem.c
mm/slab.c
mm/slab.h
mm/slab_common.c
mm/slub.c
mm/sparse.c
mm/swapfile.c
mm/util.c
mm/vmscan.c
mm/vmstat.c
net/8021q/vlan_dev.c
net/appletalk/aarp.c
net/appletalk/ddp.c
net/atm/lec.c
net/batman-adv/bat_v_elp.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/sysfs.c
net/batman-adv/translation-table.c
net/bluetooth/sco.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_netfilter_ipv6.c
net/bridge/br_netlink.c
net/bridge/netfilter/ebtables.c
net/ceph/ceph_common.c
net/ceph/messenger.c
net/ceph/mon_client.c
net/core/datagram.c
net/core/dev.c
net/core/devlink.c
net/core/ethtool.c
net/core/failover.c
net/core/filter.c
net/core/flow_dissector.c
net/core/net-sysfs.c
net/core/net_namespace.c
net/core/ptp_classifier.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/dccp/feat.c
net/dccp/ipv6.c
net/dsa/tag_qca.c
net/ipv4/esp4.c
net/ipv4/esp4_offload.c
net/ipv4/fou.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_options.c
net/ipv4/ip_output.c
net/ipv4/ip_vti.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_dctcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp_offload.c
net/ipv4/xfrm4_policy.c
net/ipv6/addrlabel.c
net/ipv6/esp6_offload.c
net/ipv6/ila/ila_xlat.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/netfilter/ip6t_srh.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/xfrm6_tunnel.c
net/kcm/kcmsock.c
net/key/af_key.c
net/l2tp/l2tp_core.c
net/llc/af_llc.c
net/mac80211/debugfs_netdev.c
net/mac80211/driver-ops.h
net/mac80211/ht.c
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/mesh_pathtbl.c
net/mac80211/rx.c
net/mac80211/trace_msg.h
net/mac80211/tx.c
net/mpls/mpls_iptunnel.c
net/ncsi/ncsi-netlink.c
net/ncsi/ncsi-rsp.c
net/netfilter/Kconfig
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_icmp.c
net/netfilter/nf_conntrack_proto_icmpv6.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_objref.c
net/netfilter/nft_redir.c
net/netfilter/nft_set_rbtree.c
net/netfilter/xt_time.c
net/netlink/af_netlink.c
net/netlink/genetlink.c
net/netrom/af_netrom.c
net/netrom/nr_loopback.c
net/netrom/nr_route.c
net/netrom/sysctl_net_netrom.c
net/nfc/llcp_sock.c
net/nfc/nci/hci.c
net/openvswitch/datapath.c
net/openvswitch/flow_netlink.c
net/packet/af_packet.c
net/rds/af_rds.c
net/rds/bind.c
net/rds/ib_fmr.c
net/rds/ib_rdma.c
net/rds/ib_recv.c
net/rds/tcp.c
net/rose/rose_loopback.c
net/rose/rose_subr.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_object.c
net/rxrpc/conn_event.c
net/rxrpc/input.c
net/rxrpc/local_object.c
net/rxrpc/output.c
net/rxrpc/peer_event.c
net/rxrpc/sendmsg.c
net/sched/Kconfig
net/sched/act_api.c
net/sched/act_bpf.c
net/sched/act_connmark.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ife.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_sample.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/act_skbmod.c
net/sched/act_tunnel_key.c
net/sched/act_vlan.c
net/sched/cls_api.c
net/sched/cls_matchall.c
net/sched/sch_cake.c
net/sched/sch_cbq.c
net/sched/sch_drr.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_mq.c
net/sched/sch_mqprio.c
net/sched/sch_multiq.c
net/sched/sch_prio.c
net/sched/sch_qfq.c
net/sched/sch_red.c
net/sched/sch_sfb.c
net/sched/sch_taprio.c
net/sched/sch_tbf.c
net/sctp/protocol.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/smc/af_smc.c
net/smc/smc_close.c
net/smc/smc_close.h
net/smc/smc_ism.c
net/smc/smc_pnet.c
net/socket.c
net/strparser/strparser.c
net/sunrpc/cache.c
net/sunrpc/clnt.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtsock.c
net/tipc/group.c
net/tipc/link.c
net/tipc/name_table.c
net/tipc/net.c
net/tipc/netlink_compat.c
net/tipc/node.c
net/tipc/socket.c
net/tipc/sysctl.c
net/tipc/topsrv.c
net/tls/tls_device.c
net/tls/tls_device_fallback.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/scan.c
net/wireless/util.c
net/xdp/xdp_umem.c
net/xfrm/xfrm_interface.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
scripts/Makefile.build
scripts/Makefile.ubsan
scripts/atomic/gen-atomics.sh
scripts/checkpatch.pl
scripts/coccinelle/api/stream_open.cocci [new file with mode: 0644]
scripts/coccinelle/free/put_device.cocci
scripts/coccinelle/misc/badty.cocci
scripts/kconfig/lxdialog/inputbox.c
scripts/kconfig/nconf.c
scripts/kconfig/nconf.gui.c
scripts/mod/modpost.c
scripts/selinux/genheaders/genheaders.c
scripts/selinux/mdp/mdp.c
security/Kconfig
security/apparmor/apparmorfs.c
security/apparmor/lsm.c
security/device_cgroup.c
security/inode.c
security/keys/trusted.c
security/selinux/include/classmap.h
security/selinux/ss/policydb.c
security/yama/yama_lsm.c
sound/core/info.c
sound/core/init.c
sound/core/oss/pcm_oss.c
sound/core/pcm_native.c
sound/core/rawmidi.c
sound/core/seq/oss/seq_oss_synth.c
sound/core/seq/seq_clientmgr.c
sound/drivers/opl3/opl3_voice.h
sound/firewire/motu/motu.c
sound/hda/ext/hdac_ext_bus.c
sound/hda/hdac_bus.c
sound/hda/hdac_component.c
sound/isa/sb/sb8.c
sound/pci/echoaudio/echoaudio.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/Kconfig
sound/soc/codecs/ab8500-codec.c
sound/soc/codecs/cs35l35.c
sound/soc/codecs/cs4270.c
sound/soc/codecs/hdac_hda.c
sound/soc/codecs/hdac_hda.h
sound/soc/codecs/hdmi-codec.c
sound/soc/codecs/nau8810.c
sound/soc/codecs/nau8824.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/tlv320aic32x4-i2c.c
sound/soc/codecs/tlv320aic32x4-spi.c
sound/soc/codecs/tlv320aic32x4.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/wm_adsp.c
sound/soc/codecs/wm_adsp.h
sound/soc/fsl/fsl_asrc.c
sound/soc/fsl/fsl_esai.c
sound/soc/generic/audio-graph-card.c
sound/soc/generic/simple-card.c
sound/soc/intel/atom/sst-mfld-platform-pcm.c
sound/soc/intel/boards/cht_bsw_max98090_ti.c
sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
sound/soc/intel/skylake/skl-messages.c
sound/soc/intel/skylake/skl-pcm.c
sound/soc/mediatek/common/mtk-btcvsd.c
sound/soc/mediatek/mt8183/mt8183-afe-clk.c
sound/soc/rockchip/rockchip_pdm.c
sound/soc/samsung/i2s.c
sound/soc/samsung/odroid.c
sound/soc/sh/rcar/core.c
sound/soc/sh/rcar/rsnd.h
sound/soc/sh/rcar/src.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-pcm.c
sound/soc/soc-topology.c
sound/soc/stm/stm32_adfsdm.c
sound/soc/stm/stm32_i2s.c
sound/soc/stm/stm32_sai.c
sound/soc/stm/stm32_sai_sub.c
sound/usb/line6/driver.c
sound/usb/line6/podhd.c
sound/usb/line6/toneport.c
sound/xen/xen_snd_front_alsa.c
tools/arch/alpha/include/uapi/asm/mman.h
tools/arch/arc/include/uapi/asm/unistd.h [new file with mode: 0644]
tools/arch/arm64/include/uapi/asm/unistd.h
tools/arch/hexagon/include/uapi/asm/unistd.h [new file with mode: 0644]
tools/arch/mips/include/uapi/asm/mman.h
tools/arch/parisc/include/uapi/asm/mman.h
tools/arch/powerpc/include/uapi/asm/kvm.h
tools/arch/riscv/include/uapi/asm/unistd.h [new file with mode: 0644]
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/uapi/asm/vmx.h
tools/arch/xtensa/include/uapi/asm/mman.h
tools/bpf/bpftool/map.c
tools/bpf/bpftool/prog.c
tools/build/Makefile.feature
tools/build/feature/Makefile
tools/build/feature/test-all.c
tools/build/feature/test-libopencsd.c
tools/build/feature/test-libzstd.c [new file with mode: 0644]
tools/include/uapi/asm-generic/mman-common-tools.h [new file with mode: 0644]
tools/include/uapi/asm-generic/mman-common.h
tools/include/uapi/asm-generic/mman.h
tools/include/uapi/asm-generic/unistd.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/fcntl.h
tools/include/uapi/linux/in.h
tools/include/uapi/linux/mman.h
tools/include/uapi/sound/asound.h
tools/io_uring/io_uring-bench.c
tools/lib/bpf/.gitignore
tools/lib/bpf/Makefile
tools/lib/bpf/README.rst
tools/lib/bpf/btf.c
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf.h
tools/lib/bpf/libbpf.map
tools/lib/bpf/xsk.c
tools/lib/traceevent/event-parse-api.c
tools/lib/traceevent/event-parse-local.h
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/event-parse.h
tools/lib/traceevent/event-plugin.c
tools/lib/traceevent/kbuffer-parse.c
tools/lib/traceevent/kbuffer.h
tools/lib/traceevent/parse-filter.c
tools/lib/traceevent/parse-utils.c
tools/lib/traceevent/plugin_cfg80211.c
tools/lib/traceevent/plugin_function.c
tools/lib/traceevent/plugin_hrtimer.c
tools/lib/traceevent/plugin_jbd2.c
tools/lib/traceevent/plugin_kmem.c
tools/lib/traceevent/plugin_kvm.c
tools/lib/traceevent/plugin_mac80211.c
tools/lib/traceevent/plugin_sched_switch.c
tools/lib/traceevent/plugin_scsi.c
tools/lib/traceevent/plugin_xen.c
tools/memory-model/Documentation/explanation.txt
tools/memory-model/README
tools/memory-model/linux-kernel.bell
tools/memory-model/linux-kernel.cat
tools/memory-model/linux-kernel.def
tools/memory-model/lock.cat
tools/objtool/Makefile
tools/objtool/arch.h
tools/objtool/arch/x86/decode.c
tools/objtool/builtin-check.c
tools/objtool/builtin.h
tools/objtool/check.c
tools/objtool/check.h
tools/objtool/elf.c
tools/objtool/elf.h
tools/objtool/special.c
tools/objtool/special.h
tools/objtool/warn.h
tools/perf/Documentation/Build.txt
tools/perf/Documentation/perf-config.txt
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Documentation/perf-script.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Documentation/tips.txt
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
tools/perf/arch/x86/util/Build
tools/perf/arch/x86/util/archinsn.c [new file with mode: 0644]
tools/perf/bench/epoll-ctl.c
tools/perf/bench/epoll-wait.c
tools/perf/bench/numa.c
tools/perf/builtin-kmem.c
tools/perf/builtin-list.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin-version.c
tools/perf/builtin.h
tools/perf/check-headers.sh
tools/perf/examples/bpf/augmented_raw_syscalls.c
tools/perf/perf.c
tools/perf/perf.h
tools/perf/pmu-events/arch/powerpc/power8/other.json
tools/perf/pmu-events/arch/s390/cf_z14/extended.json
tools/perf/pmu-events/arch/x86/amdfam17h/branch.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/amdfam17h/cache.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/amdfam17h/core.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/amdfam17h/memory.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/amdfam17h/other.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/bonnell/frontend.json
tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
tools/perf/pmu-events/arch/x86/broadwell/cache.json
tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
tools/perf/pmu-events/arch/x86/broadwell/frontend.json
tools/perf/pmu-events/arch/x86/broadwell/memory.json
tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
tools/perf/pmu-events/arch/x86/broadwellde/cache.json
tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
tools/perf/pmu-events/arch/x86/broadwellx/cache.json
tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
tools/perf/pmu-events/arch/x86/broadwellx/memory.json
tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
tools/perf/pmu-events/arch/x86/goldmont/cache.json
tools/perf/pmu-events/arch/x86/goldmont/memory.json
tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
tools/perf/pmu-events/arch/x86/haswell/cache.json
tools/perf/pmu-events/arch/x86/haswell/floating-point.json
tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
tools/perf/pmu-events/arch/x86/haswell/memory.json
tools/perf/pmu-events/arch/x86/haswell/pipeline.json
tools/perf/pmu-events/arch/x86/haswellx/cache.json
tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
tools/perf/pmu-events/arch/x86/haswellx/memory.json
tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
tools/perf/pmu-events/arch/x86/ivybridge/cache.json
tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
tools/perf/pmu-events/arch/x86/jaketown/cache.json
tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
tools/perf/pmu-events/arch/x86/knightslanding/cache.json
tools/perf/pmu-events/arch/x86/knightslanding/memory.json
tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
tools/perf/pmu-events/arch/x86/mapfile.csv
tools/perf/pmu-events/arch/x86/sandybridge/cache.json
tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
tools/perf/pmu-events/arch/x86/sandybridge/memory.json
tools/perf/pmu-events/arch/x86/sandybridge/other.json
tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
tools/perf/pmu-events/arch/x86/silvermont/cache.json
tools/perf/pmu-events/arch/x86/silvermont/other.json [new file with mode: 0644]
tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
tools/perf/pmu-events/arch/x86/skylake/cache.json
tools/perf/pmu-events/arch/x86/skylake/frontend.json
tools/perf/pmu-events/arch/x86/skylake/memory.json
tools/perf/pmu-events/arch/x86/skylake/pipeline.json
tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
tools/perf/pmu-events/arch/x86/skylakex/cache.json
tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
tools/perf/pmu-events/arch/x86/skylakex/frontend.json
tools/perf/pmu-events/arch/x86/skylakex/memory.json
tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
tools/perf/scripts/python/export-to-postgresql.py
tools/perf/scripts/python/export-to-sqlite.py
tools/perf/scripts/python/exported-sql-viewer.py
tools/perf/tests/attr/test-record-C0
tools/perf/tests/attr/test-record-basic
tools/perf/tests/attr/test-record-branch-any
tools/perf/tests/attr/test-record-branch-filter-any
tools/perf/tests/attr/test-record-branch-filter-any_call
tools/perf/tests/attr/test-record-branch-filter-any_ret
tools/perf/tests/attr/test-record-branch-filter-hv
tools/perf/tests/attr/test-record-branch-filter-ind_call
tools/perf/tests/attr/test-record-branch-filter-k
tools/perf/tests/attr/test-record-branch-filter-u
tools/perf/tests/attr/test-record-count
tools/perf/tests/attr/test-record-data
tools/perf/tests/attr/test-record-freq
tools/perf/tests/attr/test-record-graph-default
tools/perf/tests/attr/test-record-graph-dwarf
tools/perf/tests/attr/test-record-graph-fp
tools/perf/tests/attr/test-record-group
tools/perf/tests/attr/test-record-group-sampling
tools/perf/tests/attr/test-record-group1
tools/perf/tests/attr/test-record-no-buffering
tools/perf/tests/attr/test-record-no-inherit
tools/perf/tests/attr/test-record-no-samples
tools/perf/tests/attr/test-record-period
tools/perf/tests/attr/test-record-raw
tools/perf/tests/backward-ring-buffer.c
tools/perf/tests/evsel-tp-sched.c
tools/perf/tests/expr.c
tools/perf/tests/openat-syscall-all-cpus.c
tools/perf/trace/beauty/mmap_flags.sh
tools/perf/trace/beauty/renameat.c
tools/perf/trace/strace/groups/string [new file with mode: 0644]
tools/perf/ui/browser.c
tools/perf/ui/browsers/Build
tools/perf/ui/browsers/annotate.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/browsers/res_sample.c [new file with mode: 0644]
tools/perf/ui/browsers/scripts.c
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/archinsn.h [new file with mode: 0644]
tools/perf/util/bpf-event.c
tools/perf/util/bpf-event.h
tools/perf/util/build-id.c
tools/perf/util/cloexec.c
tools/perf/util/config.c
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
tools/perf/util/cs-etm.c
tools/perf/util/data-convert-bt.c
tools/perf/util/data.c
tools/perf/util/data.h
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/env.c
tools/perf/util/env.h
tools/perf/util/event.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
tools/perf/util/machine.c
tools/perf/util/map.c
tools/perf/util/map.h
tools/perf/util/mmap.c
tools/perf/util/mmap.h
tools/perf/util/ordered-events.c
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/parse-events.l
tools/perf/util/parse-events.y
tools/perf/util/pmu.c
tools/perf/util/probe-event.c
tools/perf/util/python.c
tools/perf/util/scripting-engines/trace-event-perl.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/session.c
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/stat-display.c
tools/perf/util/stat.c
tools/perf/util/symbol.c
tools/perf/util/symbol_conf.h
tools/perf/util/time-utils.c
tools/perf/util/time-utils.h
tools/perf/util/trace-event-parse.c
tools/perf/util/trace-event-read.c
tools/perf/util/trace-event.c
tools/power/x86/turbostat/turbostat.c
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/bpf/bpf_helpers.h
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
tools/testing/selftests/bpf/prog_tests/map_lock.c
tools/testing/selftests/bpf/prog_tests/spinlock.c
tools/testing/selftests/bpf/progs/bpf_flow.c
tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
tools/testing/selftests/bpf/test_btf.c
tools/testing/selftests/bpf/test_sock_fields.c
tools/testing/selftests/bpf/verifier/calls.c
tools/testing/selftests/bpf/verifier/direct_packet_access.c
tools/testing/selftests/bpf/verifier/ref_tracking.c
tools/testing/selftests/bpf/verifier/sock.c
tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/dirty_log_test.c
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
tools/testing/selftests/kvm/x86_64/evmcs_test.c
tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
tools/testing/selftests/kvm/x86_64/smm_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/state_test.c
tools/testing/selftests/net/fib_rule_tests.sh
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/run_afpackettests
tools/testing/selftests/net/run_netsocktests
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/conntrack_icmp_related.sh [new file with mode: 0755]
tools/testing/selftests/netfilter/nft_nat.sh
tools/testing/selftests/proc/proc-pid-vm.c
tools/testing/selftests/proc/proc-self-map-files-002.c
tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh
tools/testing/selftests/rcutorture/bin/config_override.sh
tools/testing/selftests/rcutorture/bin/configcheck.sh
tools/testing/selftests/rcutorture/bin/configinit.sh
tools/testing/selftests/rcutorture/bin/cpus2use.sh
tools/testing/selftests/rcutorture/bin/functions.sh
tools/testing/selftests/rcutorture/bin/jitter.sh
tools/testing/selftests/rcutorture/bin/kvm-build.sh
tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
tools/testing/selftests/rcutorture/bin/kvm.sh
tools/testing/selftests/rcutorture/bin/mkinitrd.sh
tools/testing/selftests/rcutorture/bin/parse-build.sh
tools/testing/selftests/rcutorture/bin/parse-console.sh
tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
tools/testing/selftests/seccomp/seccomp_bpf.c
tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json [new file with mode: 0644]
tools/testing/selftests/tc-testing/tc-tests/actions/police.json
tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
tools/testing/selftests/tpm2/tpm2.py
tools/testing/selftests/tpm2/tpm2_tests.py
virt/kvm/arm/arch_timer.c
virt/kvm/arm/arm.c
virt/kvm/arm/hyp/vgic-v3-sr.c
virt/kvm/arm/mmu.c
virt/kvm/arm/vgic/vgic-its.c
virt/kvm/arm/vgic/vgic-mmio-v3.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic.c
virt/kvm/arm/vgic/vgic.h
virt/kvm/eventfd.c
virt/kvm/irqchip.c
virt/kvm/kvm_main.c

index f49620f506f17a95bda75dd4cbfd2a544ee0a8b4..f3923a1f98583bef70d7beeac5954da858079a3c 100644 (file)
@@ -78,6 +78,8 @@ ForEachMacros:
   - 'ata_qc_for_each_with_internal'
   - 'ax25_for_each'
   - 'ax25_uid_for_each'
+  - '__bio_for_each_bvec'
+  - 'bio_for_each_bvec'
   - 'bio_for_each_integrity_vec'
   - '__bio_for_each_segment'
   - 'bio_for_each_segment'
@@ -118,10 +120,12 @@ ForEachMacros:
   - 'drm_for_each_legacy_plane'
   - 'drm_for_each_plane'
   - 'drm_for_each_plane_mask'
+  - 'drm_for_each_privobj'
   - 'drm_mm_for_each_hole'
   - 'drm_mm_for_each_node'
   - 'drm_mm_for_each_node_in_range'
   - 'drm_mm_for_each_node_safe'
+  - 'flow_action_for_each'
   - 'for_each_active_drhd_unit'
   - 'for_each_active_iommu'
   - 'for_each_available_child_of_node'
@@ -158,6 +162,9 @@ ForEachMacros:
   - 'for_each_dss_dev'
   - 'for_each_efi_memory_desc'
   - 'for_each_efi_memory_desc_in_map'
+  - 'for_each_element'
+  - 'for_each_element_extid'
+  - 'for_each_element_id'
   - 'for_each_endpoint_of_node'
   - 'for_each_evictable_lru'
   - 'for_each_fib6_node_rt_rcu'
@@ -195,6 +202,7 @@ ForEachMacros:
   - 'for_each_net_rcu'
   - 'for_each_new_connector_in_state'
   - 'for_each_new_crtc_in_state'
+  - 'for_each_new_mst_mgr_in_state'
   - 'for_each_new_plane_in_state'
   - 'for_each_new_private_obj_in_state'
   - 'for_each_node'
@@ -210,8 +218,10 @@ ForEachMacros:
   - 'for_each_of_pci_range'
   - 'for_each_old_connector_in_state'
   - 'for_each_old_crtc_in_state'
+  - 'for_each_old_mst_mgr_in_state'
   - 'for_each_oldnew_connector_in_state'
   - 'for_each_oldnew_crtc_in_state'
+  - 'for_each_oldnew_mst_mgr_in_state'
   - 'for_each_oldnew_plane_in_state'
   - 'for_each_oldnew_plane_in_state_reverse'
   - 'for_each_oldnew_private_obj_in_state'
@@ -243,6 +253,9 @@ ForEachMacros:
   - 'for_each_sg_dma_page'
   - 'for_each_sg_page'
   - 'for_each_sibling_event'
+  - 'for_each_subelement'
+  - 'for_each_subelement_extid'
+  - 'for_each_subelement_id'
   - '__for_each_thread'
   - 'for_each_thread'
   - 'for_each_zone'
@@ -252,6 +265,8 @@ ForEachMacros:
   - 'fwnode_for_each_child_node'
   - 'fwnode_graph_for_each_endpoint'
   - 'gadget_for_each_ep'
+  - 'genradix_for_each'
+  - 'genradix_for_each_from'
   - 'hash_for_each'
   - 'hash_for_each_possible'
   - 'hash_for_each_possible_rcu'
@@ -293,7 +308,11 @@ ForEachMacros:
   - 'key_for_each'
   - 'key_for_each_safe'
   - 'klp_for_each_func'
+  - 'klp_for_each_func_safe'
+  - 'klp_for_each_func_static'
   - 'klp_for_each_object'
+  - 'klp_for_each_object_safe'
+  - 'klp_for_each_object_static'
   - 'kvm_for_each_memslot'
   - 'kvm_for_each_vcpu'
   - 'list_for_each'
@@ -324,6 +343,8 @@ ForEachMacros:
   - 'media_device_for_each_intf'
   - 'media_device_for_each_link'
   - 'media_device_for_each_pad'
+  - 'mp_bvec_for_each_page'
+  - 'mp_bvec_for_each_segment'
   - 'nanddev_io_for_each_page'
   - 'netdev_for_each_lower_dev'
   - 'netdev_for_each_lower_private'
@@ -375,6 +396,7 @@ ForEachMacros:
   - 'rht_for_each_rcu'
   - 'rht_for_each_rcu_continue'
   - '__rq_for_each_bio'
+  - 'rq_for_each_bvec'
   - 'rq_for_each_segment'
   - 'scsi_for_each_prot_sg'
   - 'scsi_for_each_sg'
@@ -410,6 +432,8 @@ ForEachMacros:
   - 'v4l2_m2m_for_each_src_buf_safe'
   - 'virtio_device_for_each_vq'
   - 'xa_for_each'
+  - 'xa_for_each_marked'
+  - 'xa_for_each_start'
   - 'xas_for_each'
   - 'xas_for_each_conflict'
   - 'xas_for_each_marked'
index 37e1847c798869a62a797840835341c6af48bbb7..ae2bcad06f4b58eb3db18020df75cabeb59b6ed0 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -156,6 +156,8 @@ Morten Welinder <welinder@darter.rentec.com>
 Morten Welinder <welinder@troll.com>
 Mythri P K <mythripk@ti.com>
 Nguyen Anh Quynh <aquynh@gmail.com>
+Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
+Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
 Patrick Mochel <mochel@digitalimplant.org>
 Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com>
@@ -224,3 +226,5 @@ Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
 Yusuke Goda <goda.yusuke@renesas.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
 Gustavo Padovan <padovan@profusion.mobi>
+Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
+Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
index 9605dbd4b5b59ecc251913b1b327e6031cfb875f..5eea46fefcb2a95442f0fd77ce1b3bee07541821 100644 (file)
@@ -511,10 +511,12 @@ Description:      Control Symetric Multi Threading (SMT)
                control: Read/write interface to control SMT. Possible
                         values:
 
-                        "on"           SMT is enabled
-                        "off"          SMT is disabled
-                        "forceoff"     SMT is force disabled. Cannot be changed.
-                        "notsupported" SMT is not supported by the CPU
+                        "on"             SMT is enabled
+                        "off"            SMT is disabled
+                        "forceoff"       SMT is force disabled. Cannot be changed.
+                        "notsupported"   SMT is not supported by the CPU
+                        "notimplemented" SMT runtime toggling is not
+                                         implemented for the architecture
 
                         If control status is "forceoff" or "notsupported" writes
                         are rejected.
index 18f1798075633eff2235fa7f072054612dd5637e..c30c1957c7e6b866878d49d879f202ca3945813f 100644 (file)
@@ -155,8 +155,7 @@ keeping lock contention under control at all tree levels regardless
 of the level of loading on the system.
 
 </p><p>RCU updaters wait for normal grace periods by registering
-RCU callbacks, either directly via <tt>call_rcu()</tt> and
-friends (namely <tt>call_rcu_bh()</tt> and <tt>call_rcu_sched()</tt>),
+RCU callbacks, either directly via <tt>call_rcu()</tt>
 or indirectly via <tt>synchronize_rcu()</tt> and friends.
 RCU callbacks are represented by <tt>rcu_head</tt> structures,
 which are queued on <tt>rcu_data</tt> structures while they are
index 19e7a5fb6b739ec19ae337a12e3e4973f1cb70bc..57300db4b5ff607c30563bed1e8104c55f8e4b8e 100644 (file)
@@ -56,6 +56,7 @@ sections.
 RCU-preempt Expedited Grace Periods</a></h2>
 
 <p>
+<tt>CONFIG_PREEMPT=y</tt> kernels implement RCU-preempt.
 The overall flow of the handling of a given CPU by an RCU-preempt
 expedited grace period is shown in the following diagram:
 
@@ -139,6 +140,7 @@ or offline, among other things.
 RCU-sched Expedited Grace Periods</a></h2>
 
 <p>
+<tt>CONFIG_PREEMPT=n</tt> kernels implement RCU-sched.
 The overall flow of the handling of a given CPU by an RCU-sched
 expedited grace period is shown in the following diagram:
 
@@ -146,7 +148,7 @@ expedited grace period is shown in the following diagram:
 
 <p>
 As with RCU-preempt, RCU-sched's
-<tt>synchronize_sched_expedited()</tt> ignores offline and
+<tt>synchronize_rcu_expedited()</tt> ignores offline and
 idle CPUs, again because they are in remotely detectable
 quiescent states.
 However, because the
index 8d21af02b1f0722f179efa56da0e2f3977d8e4d1..c64f8d26609fb64ae34dfdf551624984f38e4c0c 100644 (file)
@@ -34,12 +34,11 @@ Similarly, any code that happens before the beginning of a given RCU grace
 period is guaranteed to see the effects of all accesses following the end
 of that grace period that are within RCU read-side critical sections.
 
-<p>This guarantee is particularly pervasive for <tt>synchronize_sched()</tt>,
-for which RCU-sched read-side critical sections include any region
+<p>Note well that RCU-sched read-side critical sections include any region
 of code for which preemption is disabled.
 Given that each individual machine instruction can be thought of as
 an extremely small region of preemption-disabled code, one can think of
-<tt>synchronize_sched()</tt> as <tt>smp_mb()</tt> on steroids.
+<tt>synchronize_rcu()</tt> as <tt>smp_mb()</tt> on steroids.
 
 <p>RCU updaters use this guarantee by splitting their updates into
 two phases, one of which is executed before the grace period and
index 687777f83b2371d4bd24e5d7abff88d7cf90a5d8..881353fd5bff1cbc1f3dead8009f5b86cb65832a 100644 (file)
@@ -81,18 +81,19 @@ currently executing on some other CPU.  We therefore cannot free
 up any data structures used by the old NMI handler until execution
 of it completes on all other CPUs.
 
-One way to accomplish this is via synchronize_sched(), perhaps as
+One way to accomplish this is via synchronize_rcu(), perhaps as
 follows:
 
        unset_nmi_callback();
-       synchronize_sched();
+       synchronize_rcu();
        kfree(my_nmi_data);
 
-This works because synchronize_sched() blocks until all CPUs complete
-any preemption-disabled segments of code that they were executing.
-Since NMI handlers disable preemption, synchronize_sched() is guaranteed
+This works because (as of v4.20) synchronize_rcu() blocks until all
+CPUs complete any preemption-disabled segments of code that they were
+executing.
+Since NMI handlers disable preemption, synchronize_rcu() is guaranteed
 not to return until all ongoing NMI handlers exit.  It is therefore safe
-to free up the handler's data as soon as synchronize_sched() returns.
+to free up the handler's data as soon as synchronize_rcu() returns.
 
 Important note: for this to work, the architecture in question must
 invoke nmi_enter() and nmi_exit() on NMI entry and exit, respectively.
index 90ec5341ee981a0a397710d2ffd1261eb2557896..53bde717017bb8cde0fbec21f0dd65db33db2f91 100644 (file)
@@ -86,10 +86,8 @@ even on a UP system.  So do not do it!  Even on a UP system, the RCU
 infrastructure -must- respect grace periods, and -must- invoke callbacks
 from a known environment in which no locks are held.
 
-It -is- safe for synchronize_sched() and synchronize_rcu_bh() to return
-immediately on an UP system.  It is also safe for synchronize_rcu()
-to return immediately on UP systems, except when running preemptable
-RCU.
+Note that it -is- safe for synchronize_rcu() to return immediately on
+UP systems, including !PREEMPT SMP builds running on UP systems.
 
 Quick Quiz #3: Why can't synchronize_rcu() return immediately on
        UP systems running preemptable RCU?
index 6f469864d9f59aa5a4d2456f01d55409d58d32c6..e98ff261a438bd4e0858a943fc469437599cc284 100644 (file)
@@ -182,16 +182,13 @@ over a rather long period of time, but improvements are always welcome!
                when publicizing a pointer to a structure that can
                be traversed by an RCU read-side critical section.
 
-5.     If call_rcu(), or a related primitive such as call_rcu_bh(),
-       call_rcu_sched(), or call_srcu() is used, the callback function
-       will be called from softirq context.  In particular, it cannot
-       block.
+5.     If call_rcu() or call_srcu() is used, the callback function will
+       be called from softirq context.  In particular, it cannot block.
 
-6.     Since synchronize_rcu() can block, it cannot be called from
-       any sort of irq context.  The same rule applies for
-       synchronize_rcu_bh(), synchronize_sched(), synchronize_srcu(),
-       synchronize_rcu_expedited(), synchronize_rcu_bh_expedited(),
-       synchronize_sched_expedite(), and synchronize_srcu_expedited().
+6.     Since synchronize_rcu() can block, it cannot be called
+       from any sort of irq context.  The same rule applies
+       for synchronize_srcu(), synchronize_rcu_expedited(), and
+       synchronize_srcu_expedited().
 
        The expedited forms of these primitives have the same semantics
        as the non-expedited forms, but expediting is both expensive and
@@ -212,20 +209,20 @@ over a rather long period of time, but improvements are always welcome!
        of the system, especially to real-time workloads running on
        the rest of the system.
 
-7.     If the updater uses call_rcu() or synchronize_rcu(), then the
-       corresponding readers must use rcu_read_lock() and
-       rcu_read_unlock().  If the updater uses call_rcu_bh() or
-       synchronize_rcu_bh(), then the corresponding readers must
-       use rcu_read_lock_bh() and rcu_read_unlock_bh().  If the
-       updater uses call_rcu_sched() or synchronize_sched(), then
-       the corresponding readers must disable preemption, possibly
-       by calling rcu_read_lock_sched() and rcu_read_unlock_sched().
-       If the updater uses synchronize_srcu() or call_srcu(), then
-       the corresponding readers must use srcu_read_lock() and
+7.     As of v4.20, a given kernel implements only one RCU flavor,
+       which is RCU-sched for PREEMPT=n and RCU-preempt for PREEMPT=y.
+       If the updater uses call_rcu() or synchronize_rcu(),
+       then the corresponding readers my use rcu_read_lock() and
+       rcu_read_unlock(), rcu_read_lock_bh() and rcu_read_unlock_bh(),
+       or any pair of primitives that disables and re-enables preemption,
+       for example, rcu_read_lock_sched() and rcu_read_unlock_sched().
+       If the updater uses synchronize_srcu() or call_srcu(),
+       then the corresponding readers must use srcu_read_lock() and
        srcu_read_unlock(), and with the same srcu_struct.  The rules for
        the expedited primitives are the same as for their non-expedited
        counterparts.  Mixing things up will result in confusion and
-       broken kernels.
+       broken kernels, and has even resulted in an exploitable security
+       issue.
 
        One exception to this rule: rcu_read_lock() and rcu_read_unlock()
        may be substituted for rcu_read_lock_bh() and rcu_read_unlock_bh()
@@ -288,8 +285,7 @@ over a rather long period of time, but improvements are always welcome!
        d.      Periodically invoke synchronize_rcu(), permitting a limited
                number of updates per grace period.
 
-       The same cautions apply to call_rcu_bh(), call_rcu_sched(),
-       call_srcu(), and kfree_rcu().
+       The same cautions apply to call_srcu() and kfree_rcu().
 
        Note that although these primitives do take action to avoid memory
        exhaustion when any given CPU has too many callbacks, a determined
@@ -322,7 +318,7 @@ over a rather long period of time, but improvements are always welcome!
 
 11.    Any lock acquired by an RCU callback must be acquired elsewhere
        with softirq disabled, e.g., via spin_lock_irqsave(),
-       spin_lock_bh(), etc.  Failing to disable irq on a given
+       spin_lock_bh(), etc.  Failing to disable softirq on a given
        acquisition of that lock will result in deadlock as soon as
        the RCU softirq handler happens to run your RCU callback while
        interrupting that acquisition's critical section.
@@ -335,13 +331,16 @@ over a rather long period of time, but improvements are always welcome!
        must use whatever locking or other synchronization is required
        to safely access and/or modify that data structure.
 
-       RCU callbacks are -usually- executed on the same CPU that executed
-       the corresponding call_rcu(), call_rcu_bh(), or call_rcu_sched(),
-       but are by -no- means guaranteed to be.  For example, if a given
-       CPU goes offline while having an RCU callback pending, then that
-       RCU callback will execute on some surviving CPU.  (If this was
-       not the case, a self-spawning RCU callback would prevent the
-       victim CPU from ever going offline.)
+       Do not assume that RCU callbacks will be executed on the same
+       CPU that executed the corresponding call_rcu() or call_srcu().
+       For example, if a given CPU goes offline while having an RCU
+       callback pending, then that RCU callback will execute on some
+       surviving CPU.  (If this was not the case, a self-spawning RCU
+       callback would prevent the victim CPU from ever going offline.)
+       Furthermore, CPUs designated by rcu_nocbs= might well -always-
+       have their RCU callbacks executed on some other CPUs, in fact,
+       for some  real-time workloads, this is the whole point of using
+       the rcu_nocbs= kernel boot parameter.
 
 13.    Unlike other forms of RCU, it -is- permissible to block in an
        SRCU read-side critical section (demarked by srcu_read_lock()
@@ -381,11 +380,11 @@ over a rather long period of time, but improvements are always welcome!
 
        SRCU's expedited primitive (synchronize_srcu_expedited())
        never sends IPIs to other CPUs, so it is easier on
-       real-time workloads than is synchronize_rcu_expedited(),
-       synchronize_rcu_bh_expedited() or synchronize_sched_expedited().
+       real-time workloads than is synchronize_rcu_expedited().
 
-       Note that rcu_dereference() and rcu_assign_pointer() relate to
-       SRCU just as they do to other forms of RCU.
+       Note that rcu_assign_pointer() relates to SRCU just as it does to
+       other forms of RCU, but instead of rcu_dereference() you should
+       use srcu_dereference() in order to avoid lockdep splats.
 
 14.    The whole point of call_rcu(), synchronize_rcu(), and friends
        is to wait until all pre-existing readers have finished before
@@ -405,6 +404,9 @@ over a rather long period of time, but improvements are always welcome!
        read-side critical sections.  It is the responsibility of the
        RCU update-side primitives to deal with this.
 
+       For SRCU readers, you can use smp_mb__after_srcu_read_unlock()
+       immediately after an srcu_read_unlock() to get a full barrier.
+
 16.    Use CONFIG_PROVE_LOCKING, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and the
        __rcu sparse checks to validate your RCU code.  These can help
        find problems as follows:
@@ -428,22 +430,19 @@ over a rather long period of time, but improvements are always welcome!
        These debugging aids can help you find problems that are
        otherwise extremely difficult to spot.
 
-17.    If you register a callback using call_rcu(), call_rcu_bh(),
-       call_rcu_sched(), or call_srcu(), and pass in a function defined
-       within a loadable module, then it in necessary to wait for
-       all pending callbacks to be invoked after the last invocation
-       and before unloading that module.  Note that it is absolutely
-       -not- sufficient to wait for a grace period!  The current (say)
-       synchronize_rcu() implementation waits only for all previous
-       callbacks registered on the CPU that synchronize_rcu() is running
-       on, but it is -not- guaranteed to wait for callbacks registered
-       on other CPUs.
+17.    If you register a callback using call_rcu() or call_srcu(), and
+       pass in a function defined within a loadable module, then it in
+       necessary to wait for all pending callbacks to be invoked after
+       the last invocation and before unloading that module.  Note that
+       it is absolutely -not- sufficient to wait for a grace period!
+       The current (say) synchronize_rcu() implementation is -not-
+       guaranteed to wait for callbacks registered on other CPUs.
+       Or even on the current CPU if that CPU recently went offline
+       and came back online.
 
        You instead need to use one of the barrier functions:
 
        o       call_rcu() -> rcu_barrier()
-       o       call_rcu_bh() -> rcu_barrier()
-       o       call_rcu_sched() -> rcu_barrier()
        o       call_srcu() -> srcu_barrier()
 
        However, these barrier functions are absolutely -not- guaranteed
index 721b3e4265155354137e199bbaadd82b9e2e4221..c818cf65c5a9a0068d87f207cad1dccd86603b2c 100644 (file)
@@ -52,10 +52,10 @@ o   If I am running on a uniprocessor kernel, which can only do one
 o      How can I see where RCU is currently used in the Linux kernel?
 
        Search for "rcu_read_lock", "rcu_read_unlock", "call_rcu",
-       "rcu_read_lock_bh", "rcu_read_unlock_bh", "call_rcu_bh",
-       "srcu_read_lock", "srcu_read_unlock", "synchronize_rcu",
-       "synchronize_net", "synchronize_srcu", and the other RCU
-       primitives.  Or grab one of the cscope databases from:
+       "rcu_read_lock_bh", "rcu_read_unlock_bh", "srcu_read_lock",
+       "srcu_read_unlock", "synchronize_rcu", "synchronize_net",
+       "synchronize_srcu", and the other RCU primitives.  Or grab one
+       of the cscope databases from:
 
        http://www.rdrop.com/users/paulmck/RCU/linuxusage/rculocktab.html
 
index ab96227bad42663b749df40b17156bf915102fd7..bf699e8cfc75ca18fe2ee94472ee46e6d2a6a9c8 100644 (file)
@@ -351,3 +351,106 @@ garbage values.
 
 In short, rcu_dereference() is -not- optional when you are going to
 dereference the resulting pointer.
+
+
+WHICH MEMBER OF THE rcu_dereference() FAMILY SHOULD YOU USE?
+
+First, please avoid using rcu_dereference_raw() and also please avoid
+using rcu_dereference_check() and rcu_dereference_protected() with a
+second argument with a constant value of 1 (or true, for that matter).
+With that caution out of the way, here is some guidance for which
+member of the rcu_dereference() to use in various situations:
+
+1.     If the access needs to be within an RCU read-side critical
+       section, use rcu_dereference().  With the new consolidated
+       RCU flavors, an RCU read-side critical section is entered
+       using rcu_read_lock(), anything that disables bottom halves,
+       anything that disables interrupts, or anything that disables
+       preemption.
+
+2.     If the access might be within an RCU read-side critical section
+       on the one hand, or protected by (say) my_lock on the other,
+       use rcu_dereference_check(), for example:
+
+               p1 = rcu_dereference_check(p->rcu_protected_pointer,
+                                          lockdep_is_held(&my_lock));
+
+
+3.     If the access might be within an RCU read-side critical section
+       on the one hand, or protected by either my_lock or your_lock on
+       the other, again use rcu_dereference_check(), for example:
+
+               p1 = rcu_dereference_check(p->rcu_protected_pointer,
+                                          lockdep_is_held(&my_lock) ||
+                                          lockdep_is_held(&your_lock));
+
+4.     If the access is on the update side, so that it is always protected
+       by my_lock, use rcu_dereference_protected():
+
+               p1 = rcu_dereference_protected(p->rcu_protected_pointer,
+                                              lockdep_is_held(&my_lock));
+
+       This can be extended to handle multiple locks as in #3 above,
+       and both can be extended to check other conditions as well.
+
+5.     If the protection is supplied by the caller, and is thus unknown
+       to this code, that is the rare case when rcu_dereference_raw()
+       is appropriate.  In addition, rcu_dereference_raw() might be
+       appropriate when the lockdep expression would be excessively
+       complex, except that a better approach in that case might be to
+       take a long hard look at your synchronization design.  Still,
+       there are data-locking cases where any one of a very large number
+       of locks or reference counters suffices to protect the pointer,
+       so rcu_dereference_raw() does have its place.
+
+       However, its place is probably quite a bit smaller than one
+       might expect given the number of uses in the current kernel.
+       Ditto for its synonym, rcu_dereference_check( ... , 1), and
+       its close relative, rcu_dereference_protected(... , 1).
+
+
+SPARSE CHECKING OF RCU-PROTECTED POINTERS
+
+The sparse static-analysis tool checks for direct access to RCU-protected
+pointers, which can result in "interesting" bugs due to compiler
+optimizations involving invented loads and perhaps also load tearing.
+For example, suppose someone mistakenly does something like this:
+
+       p = q->rcu_protected_pointer;
+       do_something_with(p->a);
+       do_something_else_with(p->b);
+
+If register pressure is high, the compiler might optimize "p" out
+of existence, transforming the code to something like this:
+
+       do_something_with(q->rcu_protected_pointer->a);
+       do_something_else_with(q->rcu_protected_pointer->b);
+
+This could fatally disappoint your code if q->rcu_protected_pointer
+changed in the meantime.  Nor is this a theoretical problem:  Exactly
+this sort of bug cost Paul E. McKenney (and several of his innocent
+colleagues) a three-day weekend back in the early 1990s.
+
+Load tearing could of course result in dereferencing a mashup of a pair
+of pointers, which also might fatally disappoint your code.
+
+These problems could have been avoided simply by making the code instead
+read as follows:
+
+       p = rcu_dereference(q->rcu_protected_pointer);
+       do_something_with(p->a);
+       do_something_else_with(p->b);
+
+Unfortunately, these sorts of bugs can be extremely hard to spot during
+review.  This is where the sparse tool comes into play, along with the
+"__rcu" marker.  If you mark a pointer declaration, whether in a structure
+or as a formal parameter, with "__rcu", which tells sparse to complain if
+this pointer is accessed directly.  It will also cause sparse to complain
+if a pointer not marked with "__rcu" is accessed using rcu_dereference()
+and friends.  For example, ->rcu_protected_pointer might be declared as
+follows:
+
+       struct foo __rcu *rcu_protected_pointer;
+
+Use of "__rcu" is opt-in.  If you choose not to use it, then you should
+ignore the sparse warnings.
index 5d7759071a3edbb3ef818e0d41a7081f39dc54ea..a2782df697328e3293769b429b5321c82e0e0b16 100644 (file)
@@ -83,16 +83,15 @@ Pseudo-code using rcu_barrier() is as follows:
    2. Execute rcu_barrier().
    3. Allow the module to be unloaded.
 
-There are also rcu_barrier_bh(), rcu_barrier_sched(), and srcu_barrier()
-functions for the other flavors of RCU, and you of course must match
-the flavor of rcu_barrier() with that of call_rcu().  If your module
-uses multiple flavors of call_rcu(), then it must also use multiple
+There is also an srcu_barrier() function for SRCU, and you of course
+must match the flavor of rcu_barrier() with that of call_rcu().  If your
+module uses multiple flavors of call_rcu(), then it must also use multiple
 flavors of rcu_barrier() when unloading that module.  For example, if
-it uses call_rcu_bh(), call_srcu() on srcu_struct_1, and call_srcu() on
+it uses call_rcu(), call_srcu() on srcu_struct_1, and call_srcu() on
 srcu_struct_2(), then the following three lines of code will be required
 when unloading:
 
- 1 rcu_barrier_bh();
+ 1 rcu_barrier();
  2 srcu_barrier(&srcu_struct_1);
  3 srcu_barrier(&srcu_struct_2);
 
@@ -185,12 +184,12 @@ module invokes call_rcu() from timers, you will need to first cancel all
 the timers, and only then invoke rcu_barrier() to wait for any remaining
 RCU callbacks to complete.
 
-Of course, if you module uses call_rcu_bh(), you will need to invoke
-rcu_barrier_bh() before unloading.  Similarly, if your module uses
-call_rcu_sched(), you will need to invoke rcu_barrier_sched() before
-unloading.  If your module uses call_rcu(), call_rcu_bh(), -and-
-call_rcu_sched(), then you will need to invoke each of rcu_barrier(),
-rcu_barrier_bh(), and rcu_barrier_sched().
+Of course, if you module uses call_rcu(), you will need to invoke
+rcu_barrier() before unloading.  Similarly, if your module uses
+call_srcu(), you will need to invoke srcu_barrier() before unloading,
+and on the same srcu_struct structure.  If your module uses call_rcu()
+-and- call_srcu(), then you will need to invoke rcu_barrier() -and-
+srcu_barrier().
 
 
 Implementing rcu_barrier()
@@ -223,8 +222,8 @@ shown below. Note that the final "1" in on_each_cpu()'s argument list
 ensures that all the calls to rcu_barrier_func() will have completed
 before on_each_cpu() returns. Line 9 then waits for the completion.
 
-This code was rewritten in 2008 to support rcu_barrier_bh() and
-rcu_barrier_sched() in addition to the original rcu_barrier().
+This code was rewritten in 2008 and several times thereafter, but this
+still gives the general idea.
 
 The rcu_barrier_func() runs on each CPU, where it invokes call_rcu()
 to post an RCU callback, as follows:
index 1ace20815bb1c97290f12fa317ec65b97b3220b4..981651a8b65d206bb073f40c982d3fc5314c4d70 100644 (file)
@@ -310,7 +310,7 @@ reader, updater, and reclaimer.
 
 
            rcu_assign_pointer()
-                                   +--------+
+                                   +--------+
            +---------------------->| reader |---------+
            |                       +--------+         |
            |                           |              |
@@ -318,12 +318,12 @@ reader, updater, and reclaimer.
            |                           |              | rcu_read_lock()
            |                           |              | rcu_read_unlock()
            |        rcu_dereference()  |              |
-       +---------+                      |              |
-       | updater |<---------------------+              |
-       +---------+                                     V
+           +---------+                 |              |
+           | updater |<----------------+              |
+           +---------+                                V
            |                                    +-----------+
            +----------------------------------->| reclaimer |
-                                                +-----------+
+                                                +-----------+
              Defer:
              synchronize_rcu() & call_rcu()
 
index b8ca28b60215a48f1ee99cfae36f20e0b8d0e8da..7e71c9c1d8e9c7eee70ef957610b483a58739232 100644 (file)
@@ -56,12 +56,12 @@ situation from a state where some tasks are stalled but the CPU is
 still doing productive work. As such, time spent in this subset of the
 stall state is tracked separately and exported in the "full" averages.
 
-The ratios are tracked as recent trends over ten, sixty, and three
-hundred second windows, which gives insight into short term events as
-well as medium and long term trends. The total absolute stall time is
-tracked and exported as well, to allow detection of latency spikes
-which wouldn't necessarily make a dent in the time averages, or to
-average trends over custom time frames.
+The ratios (in %) are tracked as recent trends over ten, sixty, and
+three hundred second windows, which gives insight into short term events
+as well as medium and long term trends. The total absolute stall time
+(in us) is tracked and exported as well, to allow detection of latency
+spikes which wouldn't necessarily make a dent in the time averages,
+or to average trends over custom time frames.
 
 Cgroup2 interface
 =================
index 2b8ee90bb64470d0d6d6ccadccf8b8fbbf86509d..b7e23e9d17704fb6c56c5147b32cbc04ee95711b 100644 (file)
                        in the "bleeding edge" mini2440 support kernel at
                        http://repo.or.cz/w/linux-2.6/mini2440.git
 
+       mitigations=
+                       [X86,PPC,S390] Control optional mitigations for CPU
+                       vulnerabilities.  This is a set of curated,
+                       arch-independent options, each of which is an
+                       aggregation of existing arch-specific options.
+
+                       off
+                               Disable all optional CPU mitigations.  This
+                               improves system performance, but it may also
+                               expose users to several CPU vulnerabilities.
+                               Equivalent to: nopti [X86,PPC]
+                                              nospectre_v1 [PPC]
+                                              nobp=0 [S390]
+                                              nospectre_v2 [X86,PPC,S390]
+                                              spectre_v2_user=off [X86]
+                                              spec_store_bypass_disable=off [X86,PPC]
+                                              l1tf=off [X86]
+
+                       auto (default)
+                               Mitigate all CPU vulnerabilities, but leave SMT
+                               enabled, even if it's vulnerable.  This is for
+                               users who don't want to be surprised by SMT
+                               getting disabled across kernel upgrades, or who
+                               have other ways of avoiding SMT-based attacks.
+                               Equivalent to: (default behavior)
+
+                       auto,nosmt
+                               Mitigate all CPU vulnerabilities, disabling SMT
+                               if needed.  This is for users who always want to
+                               be fully mitigated, even if it means losing SMT.
+                               Equivalent to: l1tf=flush,nosmt [X86]
+
        mminit_loglevel=
                        [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
                        parameter allows control of the logging verbosity for
                                see CONFIG_RAS_CEC help text.
 
        rcu_nocbs=      [KNL]
-                       The argument is a cpu list, as described above.
+                       The argument is a cpu list, as described above,
+                       except that the string "all" can be used to
+                       specify every CPU on the system.
 
                        In kernels built with CONFIG_RCU_NOCB_CPU=y, set
                        the specified list of CPUs to be no-callback CPUs.
index 913396ac582431cb3acbdc96a1bd7f4293661d0b..dca3fb0554db4928fa186e0826448d37e543a550 100644 (file)
@@ -56,6 +56,23 @@ Barriers:
   smp_mb__{before,after}_atomic()
 
 
+TYPES (signed vs unsigned)
+-----
+
+While atomic_t, atomic_long_t and atomic64_t use int, long and s64
+respectively (for hysterical raisins), the kernel uses -fno-strict-overflow
+(which implies -fwrapv) and defines signed overflow to behave like
+2s-complement.
+
+Therefore, an explicitly unsigned variant of the atomic ops is strictly
+unnecessary and we can simply cast, there is no UB.
+
+There was a bug in UBSAN prior to GCC-8 that would generate UB warnings for
+signed types.
+
+With this we also conform to the C/C++ _Atomic behaviour and things like
+P1236R1.
+
 
 SEMANTICS
 ---------
index 9a60a5d60e380ab2204334086a58ebef1f3b11e5..7313d354f20e6402e23161f24ddab227afb7c0a1 100644 (file)
@@ -148,16 +148,16 @@ The ``btf_type.size * 8`` must be equal to or greater than ``BTF_INT_BITS()``
 for the type. The maximum value of ``BTF_INT_BITS()`` is 128.
 
 The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values
-for this int. For example, a bitfield struct member has: * btf member bit
-offset 100 from the start of the structure, * btf member pointing to an int
-type, * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4``
+for this int. For example, a bitfield struct member has:
+ * btf member bit offset 100 from the start of the structure,
+ * btf member pointing to an int type,
+ * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4``
 
 Then in the struct memory layout, this member will occupy ``4`` bits starting
 from bits ``100 + 2 = 102``.
 
 Alternatively, the bitfield struct member can be the following to access the
 same bits as the above:
-
  * btf member bit offset 102,
  * btf member pointing to an int type,
  * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4``
index 6eb9d3f090cdf5d9a82afa3bd46cec5554ca116a..93cb65d52720a0ef72b2ba527ef3135b579c113e 100644 (file)
@@ -101,16 +101,6 @@ changes occur:
        translations for software managed TLB configurations.
        The sparc64 port currently does this.
 
-6) ``void tlb_migrate_finish(struct mm_struct *mm)``
-
-       This interface is called at the end of an explicit
-       process migration. This interface provides a hook
-       to allow a platform to update TLB or context-specific
-       information for the address space.
-
-       The ia64 sn2 platform is one example of a platform
-       that uses this interface.
-
 Next, we have the cache flushing interfaces.  In general, when Linux
 is changing an existing virtual-->physical mapping to a new value,
 the sequence will be in one of the following forms::
index 365dcf384d73922a22bc70a32e98444122bc4fa7..82dd7582e945461efbdff77c8222bdc1e0e162b9 100644 (file)
@@ -228,7 +228,7 @@ patternProperties:
                 - renesas,r9a06g032-smp
                 - rockchip,rk3036-smp
                 - rockchip,rk3066-smp
-               - socionext,milbeaut-m10v-smp
+                - socionext,milbeaut-m10v-smp
                 - ste,dbx500-smp
 
       cpu-release-addr:
index 08bab0e94d25a21b8ed4d87738641e5ffe74bfa2..d0ae46d7bac370d9db369d9e471ef632be2482d9 100644 (file)
@@ -26,7 +26,7 @@ Required node properties:
 
 Optional node properties:
 
- - ti,mode:     Operation mode (see above).
+ - ti,mode:     Operation mode (u8) (see above).
 
 
 Example (operation mode 2):
@@ -34,5 +34,5 @@ Example (operation mode 2):
        adc128d818@1d {
                compatible = "ti,adc128d818";
                reg = <0x1d>;
-               ti,mode = <2>;
+               ti,mode = /bits/ 8 <2>;
        };
index 8de96a4fb2d574095cb088744f405a1bab5e87f2..f977ea7617f68235f19d801d855e54bbd6f25b02 100644 (file)
@@ -16,6 +16,7 @@ Required properties:
     - "renesas,irqc-r8a7793" (R-Car M2-N)
     - "renesas,irqc-r8a7794" (R-Car E2)
     - "renesas,intc-ex-r8a774a1" (RZ/G2M)
+    - "renesas,intc-ex-r8a774c0" (RZ/G2E)
     - "renesas,intc-ex-r8a7795" (R-Car H3)
     - "renesas,intc-ex-r8a7796" (R-Car M3-W)
     - "renesas,intc-ex-r8a77965" (R-Car M3-N)
index 24c5cdaba8d279a4b132fbd2f964ae1460b3fd0f..ca83dcc84fb8ee5cfd876cf0bb3d8af5fd85ba6b 100644 (file)
@@ -20,6 +20,8 @@ Required properties:
 Optional properties:
 - phy-handle: See ethernet.txt file in the same directory.
               If absent, davinci_emac driver defaults to 100/FULL.
+- nvmem-cells: phandle, reference to an nvmem node for the MAC address
+- nvmem-cell-names: string, should be "mac-address" if nvmem is to be used
 - ti,davinci-rmii-en: 1 byte, 1 means use RMII
 - ti,davinci-no-bd-ram: boolean, does EMAC have BD RAM?
 
index bbcb255c3150230978fba796b320a71c206ddbad..93a7469e70d4131fbc2d7f2daffd1917020709ee 100644 (file)
@@ -12,10 +12,15 @@ Required properties:
 Subnodes:
 
 The integrated switch subnode should be specified according to the binding
-described in dsa/dsa.txt. As the QCA8K switches do not have a N:N mapping of
-port and PHY id, each subnode describing a port needs to have a valid phandle
-referencing the internal PHY connected to it. The CPU port of this switch is
-always port 0.
+described in dsa/dsa.txt. If the QCA8K switch is connect to a SoC's external
+mdio-bus each subnode describing a port needs to have a valid phandle
+referencing the internal PHY it is connected to. This is because there's no
+N:N mapping of port and PHY id.
+
+Don't use mixed external and internal mdio-bus configurations, as this is
+not supported by the hardware.
+
+The CPU port of this switch is always port 0.
 
 A CPU port node has the following optional node:
 
@@ -31,8 +36,9 @@ For QCA8K the 'fixed-link' sub-node supports only the following properties:
 - 'full-duplex' (boolean, optional), to indicate that full duplex is
   used. When absent, half duplex is assumed.
 
-Example:
+Examples:
 
+for the external mdio-bus configuration:
 
        &mdio0 {
                phy_port1: phy@0 {
@@ -55,12 +61,12 @@ Example:
                        reg = <4>;
                };
 
-               switch0@0 {
+               switch@10 {
                        compatible = "qca,qca8337";
                        #address-cells = <1>;
                        #size-cells = <0>;
 
-                       reg = <0>;
+                       reg = <0x10>;
 
                        ports {
                                #address-cells = <1>;
@@ -108,3 +114,56 @@ Example:
                        };
                };
        };
+
+for the internal master mdio-bus configuration:
+
+       &mdio0 {
+               switch@10 {
+                       compatible = "qca,qca8337";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       reg = <0x10>;
+
+                       ports {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               port@0 {
+                                       reg = <0>;
+                                       label = "cpu";
+                                       ethernet = <&gmac1>;
+                                       phy-mode = "rgmii";
+                                       fixed-link {
+                                               speed = 1000;
+                                               full-duplex;
+                                       };
+                               };
+
+                               port@1 {
+                                       reg = <1>;
+                                       label = "lan1";
+                               };
+
+                               port@2 {
+                                       reg = <2>;
+                                       label = "lan2";
+                               };
+
+                               port@3 {
+                                       reg = <3>;
+                                       label = "lan3";
+                               };
+
+                               port@4 {
+                                       reg = <4>;
+                                       label = "lan4";
+                               };
+
+                               port@5 {
+                                       reg = <5>;
+                                       label = "wan";
+                               };
+                       };
+               };
+       };
index cfc376bc977aa0a25e64d4e1ef617a1a326fe634..a6862158058461f5af428498ea14c98aed1f7775 100644 (file)
@@ -10,15 +10,14 @@ Documentation/devicetree/bindings/phy/phy-bindings.txt.
   the boot program; should be used in cases where the MAC address assigned to
   the device by the boot program is different from the "local-mac-address"
   property;
-- nvmem-cells: phandle, reference to an nvmem node for the MAC address;
-- nvmem-cell-names: string, should be "mac-address" if nvmem is to be used;
 - max-speed: number, specifies maximum speed in Mbit/s supported by the device;
 - max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than
   the maximum frame size (there's contradiction in the Devicetree
   Specification).
 - phy-mode: string, operation mode of the PHY interface. This is now a de-facto
   standard property; supported values are:
-  * "internal"
+  * "internal" (Internal means there is not a standard bus between the MAC and
+     the PHY, something proprietary is being used to embed the PHY in the MAC.)
   * "mii"
   * "gmii"
   * "sgmii"
index 174f292d8a3e8c14cf7d5d1105380b5f3d358544..8b80515729d7145cc05c9293857212ba914e0607 100644 (file)
@@ -26,6 +26,10 @@ Required properties:
        Optional elements: 'tsu_clk'
 - clocks: Phandles to input clocks.
 
+Optional properties:
+- nvmem-cells: phandle, reference to an nvmem node for the MAC address
+- nvmem-cell-names: string, should be "mac-address" if nvmem is to be used
+
 Optional properties for PHY child node:
 - reset-gpios : Should specify the gpio for phy reset
 - magic-packet : If present, indicates that the hardware supports waking
index 742cb470595ba4d7e2a3e467328d33a8bf5335b5..bcfb13194f16364b0ac77a4391a26c0bb34206d0 100644 (file)
@@ -16,6 +16,7 @@ Required properties:
   * "mediatek,mt8127-uart" for MT8127 compatible UARTS
   * "mediatek,mt8135-uart" for MT8135 compatible UARTS
   * "mediatek,mt8173-uart" for MT8173 compatible UARTS
+  * "mediatek,mt8183-uart", "mediatek,mt6577-uart" for MT8183 compatible UARTS
   * "mediatek,mt6577-uart" for MT6577 and all of the above
 
 - reg: The base address of the UART register bank.
index 79beb807996b7a3a17e08b5f1d6e31d4176d3fc2..4a74cf6f2797274b96510685a44f4066fac558d3 100644 (file)
@@ -370,11 +370,15 @@ autosuspend the interface's device.  When the usage counter is = 0
 then the interface is considered to be idle, and the kernel may
 autosuspend the device.
 
-Drivers need not be concerned about balancing changes to the usage
-counter; the USB core will undo any remaining "get"s when a driver
-is unbound from its interface.  As a corollary, drivers must not call
-any of the ``usb_autopm_*`` functions after their ``disconnect``
-routine has returned.
+Drivers must be careful to balance their overall changes to the usage
+counter.  Unbalanced "get"s will remain in effect when a driver is
+unbound from its interface, preventing the device from going into
+runtime suspend should the interface be bound to a driver again.  On
+the other hand, drivers are allowed to achieve this balance by calling
+the ``usb_autopm_*`` functions even after their ``disconnect`` routine
+has returned -- say from within a work-queue routine -- provided they
+retain an active reference to the interface (via ``usb_get_intf`` and
+``usb_put_intf``).
 
 Drivers using the async routines are responsible for their own
 synchronization and mutual exclusion.
index 944d1965e917e9a91496637ab484d5197d68223d..00ff0cfccfa71cdce0d02ddd8608cf962ee99308 100644 (file)
@@ -12,11 +12,13 @@ CONTENTS
 
  (4) Filesystem context security.
 
- (5) VFS filesystem context operations.
+ (5) VFS filesystem context API.
 
- (6) Parameter description.
+ (6) Superblock creation helpers.
 
- (7) Parameter helper functions.
+ (7) Parameter description.
+
+ (8) Parameter helper functions.
 
 
 ========
@@ -41,12 +43,15 @@ The creation of new mounts is now to be done in a multistep process:
 
  (7) Destroy the context.
 
-To support this, the file_system_type struct gains a new field:
+To support this, the file_system_type struct gains two new fields:
 
        int (*init_fs_context)(struct fs_context *fc);
+       const struct fs_parameter_description *parameters;
 
-which is invoked to set up the filesystem-specific parts of a filesystem
-context, including the additional space.
+The first is invoked to set up the filesystem-specific parts of a filesystem
+context, including the additional space, and the second points to the
+parameter description for validation at registration time and querying by a
+future system call.
 
 Note that security initialisation is done *after* the filesystem is called so
 that the namespaces may be adjusted first.
@@ -73,9 +78,9 @@ context.  This is represented by the fs_context structure:
                void                    *s_fs_info;
                unsigned int            sb_flags;
                unsigned int            sb_flags_mask;
+               unsigned int            s_iflags;
+               unsigned int            lsm_flags;
                enum fs_context_purpose purpose:8;
-               bool                    sloppy:1;
-               bool                    silent:1;
                ...
        };
 
@@ -141,6 +146,10 @@ The fs_context fields are as follows:
 
      Which bits SB_* flags are to be set/cleared in super_block::s_flags.
 
+ (*) unsigned int s_iflags
+
+     These will be bitwise-OR'd with s->s_iflags when a superblock is created.
+
  (*) enum fs_context_purpose
 
      This indicates the purpose for which the context is intended.  The
@@ -150,17 +159,6 @@ The fs_context fields are as follows:
        FS_CONTEXT_FOR_SUBMOUNT         -- New automatic submount of extant mount
        FS_CONTEXT_FOR_RECONFIGURE      -- Change an existing mount
 
- (*) bool sloppy
- (*) bool silent
-
-     These are set if the sloppy or silent mount options are given.
-
-     [NOTE] sloppy is probably unnecessary when userspace passes over one
-     option at a time since the error can just be ignored if userspace deems it
-     to be unimportant.
-
-     [NOTE] silent is probably redundant with sb_flags & SB_SILENT.
-
 The mount context is created by calling vfs_new_fs_context() or
 vfs_dup_fs_context() and is destroyed with put_fs_context().  Note that the
 structure is not refcounted.
@@ -342,28 +340,47 @@ number of operations used by the new mount code for this purpose:
      It should return 0 on success or a negative error code on failure.
 
 
-=================================
-VFS FILESYSTEM CONTEXT OPERATIONS
-=================================
+==========================
+VFS FILESYSTEM CONTEXT API
+==========================
 
-There are four operations for creating a filesystem context and
-one for destroying a context:
+There are four operations for creating a filesystem context and one for
+destroying a context:
 
- (*) struct fs_context *vfs_new_fs_context(struct file_system_type *fs_type,
-                                          struct dentry *reference,
-                                          unsigned int sb_flags,
-                                          unsigned int sb_flags_mask,
-                                          enum fs_context_purpose purpose);
+ (*) struct fs_context *fs_context_for_mount(
+               struct file_system_type *fs_type,
+               unsigned int sb_flags);
 
-     Create a filesystem context for a given filesystem type and purpose.  This
-     allocates the filesystem context, sets the superblock flags, initialises
-     the security and calls fs_type->init_fs_context() to initialise the
-     filesystem private data.
+     Allocate a filesystem context for the purpose of setting up a new mount,
+     whether that be with a new superblock or sharing an existing one.  This
+     sets the superblock flags, initialises the security and calls
+     fs_type->init_fs_context() to initialise the filesystem private data.
 
-     reference can be NULL or it may indicate the root dentry of a superblock
-     that is going to be reconfigured (FS_CONTEXT_FOR_RECONFIGURE) or
-     the automount point that triggered a submount (FS_CONTEXT_FOR_SUBMOUNT).
-     This is provided as a source of namespace information.
+     fs_type specifies the filesystem type that will manage the context and
+     sb_flags presets the superblock flags stored therein.
+
+ (*) struct fs_context *fs_context_for_reconfigure(
+               struct dentry *dentry,
+               unsigned int sb_flags,
+               unsigned int sb_flags_mask);
+
+     Allocate a filesystem context for the purpose of reconfiguring an
+     existing superblock.  dentry provides a reference to the superblock to be
+     configured.  sb_flags and sb_flags_mask indicate which superblock flags
+     need changing and to what.
+
+ (*) struct fs_context *fs_context_for_submount(
+               struct file_system_type *fs_type,
+               struct dentry *reference);
+
+     Allocate a filesystem context for the purpose of creating a new mount for
+     an automount point or other derived superblock.  fs_type specifies the
+     filesystem type that will manage the context and the reference dentry
+     supplies the parameters.  Namespaces are propagated from the reference
+     dentry's superblock also.
+
+     Note that it's not a requirement that the reference dentry be of the same
+     filesystem type as fs_type.
 
  (*) struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc);
 
@@ -390,20 +407,6 @@ context pointer or a negative error code.
 For the remaining operations, if an error occurs, a negative error code will be
 returned.
 
- (*) int vfs_get_tree(struct fs_context *fc);
-
-     Get or create the mountable root and superblock, using the parameters in
-     the filesystem context to select/configure the superblock.  This invokes
-     the ->validate() op and then the ->get_tree() op.
-
-     [NOTE] ->validate() could perhaps be rolled into ->get_tree() and
-     ->reconfigure().
-
- (*) struct vfsmount *vfs_create_mount(struct fs_context *fc);
-
-     Create a mount given the parameters in the specified filesystem context.
-     Note that this does not attach the mount to anything.
-
  (*) int vfs_parse_fs_param(struct fs_context *fc,
                            struct fs_parameter *param);
 
@@ -432,17 +435,80 @@ returned.
      clear the pointer, but then becomes responsible for disposing of the
      object.
 
- (*) int vfs_parse_fs_string(struct fs_context *fc, char *key,
+ (*) int vfs_parse_fs_string(struct fs_context *fc, const char *key,
                             const char *value, size_t v_size);
 
-     A wrapper around vfs_parse_fs_param() that just passes a constant string.
+     A wrapper around vfs_parse_fs_param() that copies the value string it is
+     passed.
 
  (*) int generic_parse_monolithic(struct fs_context *fc, void *data);
 
      Parse a sys_mount() data page, assuming the form to be a text list
      consisting of key[=val] options separated by commas.  Each item in the
      list is passed to vfs_mount_option().  This is the default when the
-     ->parse_monolithic() operation is NULL.
+     ->parse_monolithic() method is NULL.
+
+ (*) int vfs_get_tree(struct fs_context *fc);
+
+     Get or create the mountable root and superblock, using the parameters in
+     the filesystem context to select/configure the superblock.  This invokes
+     the ->get_tree() method.
+
+ (*) struct vfsmount *vfs_create_mount(struct fs_context *fc);
+
+     Create a mount given the parameters in the specified filesystem context.
+     Note that this does not attach the mount to anything.
+
+
+===========================
+SUPERBLOCK CREATION HELPERS
+===========================
+
+A number of VFS helpers are available for use by filesystems for the creation
+or looking up of superblocks.
+
+ (*) struct super_block *
+     sget_fc(struct fs_context *fc,
+            int (*test)(struct super_block *sb, struct fs_context *fc),
+            int (*set)(struct super_block *sb, struct fs_context *fc));
+
+     This is the core routine.  If test is non-NULL, it searches for an
+     existing superblock matching the criteria held in the fs_context, using
+     the test function to match them.  If no match is found, a new superblock
+     is created and the set function is called to set it up.
+
+     Prior to the set function being called, fc->s_fs_info will be transferred
+     to sb->s_fs_info - and fc->s_fs_info will be cleared if set returns
+     success (ie. 0).
+
+The following helpers all wrap sget_fc():
+
+ (*) int vfs_get_super(struct fs_context *fc,
+                      enum vfs_get_super_keying keying,
+                      int (*fill_super)(struct super_block *sb,
+                                        struct fs_context *fc))
+
+     This creates/looks up a deviceless superblock.  The keying indicates how
+     many superblocks of this type may exist and in what manner they may be
+     shared:
+
+       (1) vfs_get_single_super
+
+           Only one such superblock may exist in the system.  Any further
+           attempt to get a new superblock gets this one (and any parameter
+           differences are ignored).
+
+       (2) vfs_get_keyed_super
+
+           Multiple superblocks of this type may exist and they're keyed on
+           their s_fs_info pointer (for example this may refer to a
+           namespace).
+
+       (3) vfs_get_independent_super
+
+           Multiple independent superblocks of this type may exist.  This
+           function never matches an existing one and always creates a new
+           one.
 
 
 =====================
@@ -454,35 +520,22 @@ There's a core description struct that links everything together:
 
        struct fs_parameter_description {
                const char      name[16];
-               u8              nr_params;
-               u8              nr_alt_keys;
-               u8              nr_enums;
-               bool            ignore_unknown;
-               bool            no_source;
-               const char *const *keys;
-               const struct constant_table *alt_keys;
                const struct fs_parameter_spec *specs;
                const struct fs_parameter_enum *enums;
        };
 
 For example:
 
-       enum afs_param {
+       enum {
                Opt_autocell,
                Opt_bar,
                Opt_dyn,
                Opt_foo,
                Opt_source,
-               nr__afs_params
        };
 
        static const struct fs_parameter_description afs_fs_parameters = {
                .name           = "kAFS",
-               .nr_params      = nr__afs_params,
-               .nr_alt_keys    = ARRAY_SIZE(afs_param_alt_keys),
-               .nr_enums       = ARRAY_SIZE(afs_param_enums),
-               .keys           = afs_param_keys,
-               .alt_keys       = afs_param_alt_keys,
                .specs          = afs_param_specs,
                .enums          = afs_param_enums,
        };
@@ -494,28 +547,24 @@ The members are as follows:
      The name to be used in error messages generated by the parse helper
      functions.
 
- (2) u8 nr_params;
-
-     The number of discrete parameter identifiers.  This indicates the number
-     of elements in the ->types[] array and also limits the values that may be
-     used in the values that the ->keys[] array maps to.
-
-     It is expected that, for example, two parameters that are related, say
-     "acl" and "noacl" with have the same ID, but will be flagged to indicate
-     that one is the inverse of the other.  The value can then be picked out
-     from the parse result.
+ (2) const struct fs_parameter_specification *specs;
 
- (3) const struct fs_parameter_specification *specs;
+     Table of parameter specifications, terminated with a null entry, where the
+     entries are of type:
 
-     Table of parameter specifications, where the entries are of type:
-
-       struct fs_parameter_type {
-               enum fs_parameter_spec  type:8;
-               u8                      flags;
+       struct fs_parameter_spec {
+               const char              *name;
+               u8                      opt;
+               enum fs_parameter_type  type:8;
+               unsigned short          flags;
        };
 
-     and the parameter identifier is the index to the array.  'type' indicates
-     the desired value type and must be one of:
+     The 'name' field is a string to match exactly to the parameter key (no
+     wildcards, patterns and no case-independence) and 'opt' is the value that
+     will be returned by the fs_parser() function in the case of a successful
+     match.
+
+     The 'type' field indicates the desired value type and must be one of:
 
        TYPE NAME               EXPECTED VALUE          RESULT IN
        ======================= ======================= =====================
@@ -525,85 +574,65 @@ The members are as follows:
        fs_param_is_u32_octal   32-bit octal int        result->uint_32
        fs_param_is_u32_hex     32-bit hex int          result->uint_32
        fs_param_is_s32         32-bit signed int       result->int_32
+       fs_param_is_u64         64-bit unsigned int     result->uint_64
        fs_param_is_enum        Enum value name         result->uint_32
        fs_param_is_string      Arbitrary string        param->string
        fs_param_is_blob        Binary blob             param->blob
        fs_param_is_blockdev    Blockdev path           * Needs lookup
        fs_param_is_path        Path                    * Needs lookup
-       fs_param_is_fd          File descriptor         param->file
-
-     And each parameter can be qualified with 'flags':
-
-       fs_param_v_optional     The value is optional
-       fs_param_neg_with_no    If key name is prefixed with "no", it is false
-       fs_param_neg_with_empty If value is "", it is false
-       fs_param_deprecated     The parameter is deprecated.
-
-     For example:
-
-       static const struct fs_parameter_spec afs_param_specs[nr__afs_params] = {
-               [Opt_autocell]  = { fs_param_is flag },
-               [Opt_bar]       = { fs_param_is_enum },
-               [Opt_dyn]       = { fs_param_is flag },
-               [Opt_foo]       = { fs_param_is_bool, fs_param_neg_with_no },
-               [Opt_source]    = { fs_param_is_string },
-       };
+       fs_param_is_fd          File descriptor         result->int_32
 
      Note that if the value is of fs_param_is_bool type, fs_parse() will try
      to match any string value against "0", "1", "no", "yes", "false", "true".
 
-     [!] NOTE that the table must be sorted according to primary key name so
-        that ->keys[] is also sorted.
-
- (4) const char *const *keys;
-
-     Table of primary key names for the parameters.  There must be one entry
-     per defined parameter.  The table is optional if ->nr_params is 0.  The
-     table is just an array of names e.g.:
+     Each parameter can also be qualified with 'flags':
 
-       static const char *const afs_param_keys[nr__afs_params] = {
-               [Opt_autocell]  = "autocell",
-               [Opt_bar]       = "bar",
-               [Opt_dyn]       = "dyn",
-               [Opt_foo]       = "foo",
-               [Opt_source]    = "source",
-       };
-
-     [!] NOTE that the table must be sorted such that the table can be searched
-        with bsearch() using strcmp().  This means that the Opt_* values must
-        correspond to the entries in this table.
-
- (5) const struct constant_table *alt_keys;
-     u8 nr_alt_keys;
-
-     Table of additional key names and their mappings to parameter ID plus the
-     number of elements in the table.  This is optional.  The table is just an
-     array of { name, integer } pairs, e.g.:
+       fs_param_v_optional     The value is optional
+       fs_param_neg_with_no    result->negated set if key is prefixed with "no"
+       fs_param_neg_with_empty result->negated set if value is ""
+       fs_param_deprecated     The parameter is deprecated.
 
-       static const struct constant_table afs_param_keys[] = {
-               { "baz",        Opt_bar },
-               { "dynamic",    Opt_dyn },
+     These are wrapped with a number of convenience wrappers:
+
+       MACRO                   SPECIFIES
+       ======================= ===============================================
+       fsparam_flag()          fs_param_is_flag
+       fsparam_flag_no()       fs_param_is_flag, fs_param_neg_with_no
+       fsparam_bool()          fs_param_is_bool
+       fsparam_u32()           fs_param_is_u32
+       fsparam_u32oct()        fs_param_is_u32_octal
+       fsparam_u32hex()        fs_param_is_u32_hex
+       fsparam_s32()           fs_param_is_s32
+       fsparam_u64()           fs_param_is_u64
+       fsparam_enum()          fs_param_is_enum
+       fsparam_string()        fs_param_is_string
+       fsparam_blob()          fs_param_is_blob
+       fsparam_bdev()          fs_param_is_blockdev
+       fsparam_path()          fs_param_is_path
+       fsparam_fd()            fs_param_is_fd
+
+     all of which take two arguments, name string and option number - for
+     example:
+
+       static const struct fs_parameter_spec afs_param_specs[] = {
+               fsparam_flag    ("autocell",    Opt_autocell),
+               fsparam_flag    ("dyn",         Opt_dyn),
+               fsparam_string  ("source",      Opt_source),
+               fsparam_flag_no ("foo",         Opt_foo),
+               {}
        };
 
-     [!] NOTE that the table must be sorted such that strcmp() can be used with
-        bsearch() to search the entries.
-
-     The parameter ID can also be fs_param_key_removed to indicate that a
-     deprecated parameter has been removed and that an error will be given.
-     This differs from fs_param_deprecated where the parameter may still have
-     an effect.
-
-     Further, the behaviour of the parameter may differ when an alternate name
-     is used (for instance with NFS, "v3", "v4.2", etc. are alternate names).
+     An addition macro, __fsparam() is provided that takes an additional pair
+     of arguments to specify the type and the flags for anything that doesn't
+     match one of the above macros.
 
  (6) const struct fs_parameter_enum *enums;
-     u8 nr_enums;
 
-     Table of enum value names to integer mappings and the number of elements
-     stored therein.  This is of type:
+     Table of enum value names to integer mappings, terminated with a null
+     entry.  This is of type:
 
        struct fs_parameter_enum {
-               u8              param_id;
+               u8              opt;
                char            name[14];
                u8              value;
        };
@@ -621,11 +650,6 @@ The members are as follows:
      try to look the value up in the enum table and the result will be stored
      in the parse result.
 
- (7) bool no_source;
-
-     If this is set, fs_parse() will ignore any "source" parameter and not
-     pass it to the filesystem.
-
 The parser should be pointed to by the parser pointer in the file_system_type
 struct as this will provide validation on registration (if
 CONFIG_VALIDATE_FS_PARSER=y) and will allow the description to be queried from
@@ -650,9 +674,8 @@ process the parameters it is given.
                int             value;
        };
 
-     and it must be sorted such that it can be searched using bsearch() using
-     strcmp().  If a match is found, the corresponding value is returned.  If a
-     match isn't found, the not_found value is returned instead.
+     If a match is found, the corresponding value is returned.  If a match
+     isn't found, the not_found value is returned instead.
 
  (*) bool validate_constant_table(const struct constant_table *tbl,
                                  size_t tbl_size,
@@ -665,36 +688,36 @@ process the parameters it is given.
      should just be set to lie inside the low-to-high range.
 
      If all is good, true is returned.  If the table is invalid, errors are
-     logged to dmesg, the stack is dumped and false is returned.
+     logged to dmesg and false is returned.
+
+ (*) bool fs_validate_description(const struct fs_parameter_description *desc);
+
+     This performs some validation checks on a parameter description.  It
+     returns true if the description is good and false if it is not.  It will
+     log errors to dmesg if validation fails.
 
  (*) int fs_parse(struct fs_context *fc,
-                 const struct fs_param_parser *parser,
+                 const struct fs_parameter_description *desc,
                  struct fs_parameter *param,
-                 struct fs_param_parse_result *result);
+                 struct fs_parse_result *result);
 
      This is the main interpreter of parameters.  It uses the parameter
-     description (parser) to look up the name of the parameter to use and to
-     convert that to a parameter ID (stored in result->key).
+     description to look up a parameter by key name and to convert that to an
+     option number (which it returns).
 
      If successful, and if the parameter type indicates the result is a
      boolean, integer or enum type, the value is converted by this function and
-     the result stored in result->{boolean,int_32,uint_32}.
+     the result stored in result->{boolean,int_32,uint_32,uint_64}.
 
      If a match isn't initially made, the key is prefixed with "no" and no
      value is present then an attempt will be made to look up the key with the
      prefix removed.  If this matches a parameter for which the type has flag
-     fs_param_neg_with_no set, then a match will be made and the value will be
-     set to false/0/NULL.
-
-     If the parameter is successfully matched and, optionally, parsed
-     correctly, 1 is returned.  If the parameter isn't matched and
-     parser->ignore_unknown is set, then 0 is returned.  Otherwise -EINVAL is
-     returned.
-
- (*) bool fs_validate_description(const struct fs_parameter_description *desc);
+     fs_param_neg_with_no set, then a match will be made and result->negated
+     will be set to true.
 
-     This is validates the parameter description.  It returns true if the
-     description is good and false if it is not.
+     If the parameter isn't matched, -ENOPARAM will be returned; if the
+     parameter is matched, but the value is erroneous, -EINVAL will be
+     returned; otherwise the parameter's option number will be returned.
 
  (*) int fs_lookup_param(struct fs_context *fc,
                         struct fs_parameter *value,
index d1ee484a787d1b476cf13bcf7d7b53ac084fb63e..ee9984f3586897c870bd42b854f5d883b245621e 100644 (file)
@@ -36,6 +36,7 @@ Supported adapters:
   * Intel Cannon Lake (PCH)
   * Intel Cedar Fork (PCH)
   * Intel Ice Lake (PCH)
+  * Intel Comet Lake (PCH)
    Datasheets: Publicly available at the Intel website
 
 On Intel Patsburg and later chipsets, both the normal host SMBus controller
index 10f4499e677c0863475c8583edfe83a224bc2bae..ee60e519438aaffefa2b29c9787ecc5335966a5f 100644 (file)
@@ -243,10 +243,10 @@ Optimization
 ^^^^^^^^^^^^
 
 The Kprobe-optimizer doesn't insert the jump instruction immediately;
-rather, it calls synchronize_sched() for safety first, because it's
+rather, it calls synchronize_rcu() for safety first, because it's
 possible for a CPU to be interrupted in the middle of executing the
-optimized region [3]_.  As you know, synchronize_sched() can ensure
-that all interruptions that were active when synchronize_sched()
+optimized region [3]_.  As you know, synchronize_rcu() can ensure
+that all interruptions that were active when synchronize_rcu()
 was called are done, but only if CONFIG_PREEMPT=n.  So, this version
 of kprobe optimization supports only kernels with CONFIG_PREEMPT=n [4]_.
 
index f79934225d8d35bd98d3ebd4dcc996324abd66b9..ca983328976bcf36e040ab7508eb29583ed5ca4e 100644 (file)
@@ -102,9 +102,11 @@ Byte sequences
                 dictionary which is empty, and that it will always be
                 invalid at this place.
 
-      17      : bitstream version. If the first byte is 17, the next byte
-                gives the bitstream version (version 1 only). If the first byte
-                is not 17, the bitstream version is 0.
+      17      : bitstream version. If the first byte is 17, and compressed
+                stream length is at least 5 bytes (length of shortest possible
+                versioned bitstream), the next byte gives the bitstream version
+                (version 1 only).
+                Otherwise, the bitstream version is 0.
 
       18..21  : copy 0..3 literals
                 state = (byte - 17) = 0..3  [ copy <state> literals ]
index f460031d85313821ac91d940b915d26994d1ed00..177ac44fa0fac33363d34f840c663d4699039a79 100644 (file)
@@ -623,7 +623,7 @@ the remote via /dev/input/event devices.
 
     -  .. row 78
 
-       -  ``KEY_SCREEN``
+       -  ``KEY_ASPECT_RATIO``
 
        -  Select screen aspect ratio
 
@@ -631,7 +631,7 @@ the remote via /dev/input/event devices.
 
     -  .. row 79
 
-       -  ``KEY_ZOOM``
+       -  ``KEY_FULL_SCREEN``
 
        -  Put device into zoom/full screen mode
 
diff --git a/Documentation/networking/bpf_flow_dissector.rst b/Documentation/networking/bpf_flow_dissector.rst
new file mode 100644 (file)
index 0000000..b375ae2
--- /dev/null
@@ -0,0 +1,126 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================
+BPF Flow Dissector
+==================
+
+Overview
+========
+
+Flow dissector is a routine that parses metadata out of the packets. It's
+used in the various places in the networking subsystem (RFS, flow hash, etc).
+
+BPF flow dissector is an attempt to reimplement C-based flow dissector logic
+in BPF to gain all the benefits of BPF verifier (namely, limits on the
+number of instructions and tail calls).
+
+API
+===
+
+BPF flow dissector programs operate on an ``__sk_buff``. However, only the
+limited set of fields is allowed: ``data``, ``data_end`` and ``flow_keys``.
+``flow_keys`` is ``struct bpf_flow_keys`` and contains flow dissector input
+and output arguments.
+
+The inputs are:
+  * ``nhoff`` - initial offset of the networking header
+  * ``thoff`` - initial offset of the transport header, initialized to nhoff
+  * ``n_proto`` - L3 protocol type, parsed out of L2 header
+
+Flow dissector BPF program should fill out the rest of the ``struct
+bpf_flow_keys`` fields. Input arguments ``nhoff/thoff/n_proto`` should be
+also adjusted accordingly.
+
+The return code of the BPF program is either BPF_OK to indicate successful
+dissection, or BPF_DROP to indicate parsing error.
+
+__sk_buff->data
+===============
+
+In the VLAN-less case, this is what the initial state of the BPF flow
+dissector looks like::
+
+  +------+------+------------+-----------+
+  | DMAC | SMAC | ETHER_TYPE | L3_HEADER |
+  +------+------+------------+-----------+
+                              ^
+                              |
+                              +-- flow dissector starts here
+
+
+.. code:: c
+
+  skb->data + flow_keys->nhoff point to the first byte of L3_HEADER
+  flow_keys->thoff = nhoff
+  flow_keys->n_proto = ETHER_TYPE
+
+In case of VLAN, flow dissector can be called with the two different states.
+
+Pre-VLAN parsing::
+
+  +------+------+------+-----+-----------+-----------+
+  | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER |
+  +------+------+------+-----+-----------+-----------+
+                        ^
+                        |
+                        +-- flow dissector starts here
+
+.. code:: c
+
+  skb->data + flow_keys->nhoff point the to first byte of TCI
+  flow_keys->thoff = nhoff
+  flow_keys->n_proto = TPID
+
+Please note that TPID can be 802.1AD and, hence, BPF program would
+have to parse VLAN information twice for double tagged packets.
+
+
+Post-VLAN parsing::
+
+  +------+------+------+-----+-----------+-----------+
+  | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER |
+  +------+------+------+-----+-----------+-----------+
+                                          ^
+                                          |
+                                          +-- flow dissector starts here
+
+.. code:: c
+
+  skb->data + flow_keys->nhoff point the to first byte of L3_HEADER
+  flow_keys->thoff = nhoff
+  flow_keys->n_proto = ETHER_TYPE
+
+In this case VLAN information has been processed before the flow dissector
+and BPF flow dissector is not required to handle it.
+
+
+The takeaway here is as follows: BPF flow dissector program can be called with
+the optional VLAN header and should gracefully handle both cases: when single
+or double VLAN is present and when it is not present. The same program
+can be called for both cases and would have to be written carefully to
+handle both cases.
+
+
+Reference Implementation
+========================
+
+See ``tools/testing/selftests/bpf/progs/bpf_flow.c`` for the reference
+implementation and ``tools/testing/selftests/bpf/flow_dissector_load.[hc]``
+for the loader. bpftool can be used to load BPF flow dissector program as well.
+
+The reference implementation is organized as follows:
+  * ``jmp_table`` map that contains sub-programs for each supported L3 protocol
+  * ``_dissect`` routine - entry point; it does input ``n_proto`` parsing and
+    does ``bpf_tail_call`` to the appropriate L3 handler
+
+Since BPF at this point doesn't support looping (or any jumping back),
+jmp_table is used instead to handle multiple levels of encapsulation (and
+IPv6 options).
+
+
+Current Limitations
+===================
+BPF flow dissector doesn't support exporting all the metadata that in-kernel
+C-based implementation can export. Notable example is single VLAN (802.1Q)
+and double VLAN (802.1AD) tags. Please refer to the ``struct bpf_flow_keys``
+for a set of information that's currently can be exported from the BPF context.
index e12a4900cf72cb00b1ade4c0257a23c93d2d8f21..d192f8b9948b5483c16b83f41a5b25c1e5cda846 100644 (file)
@@ -22,8 +22,6 @@ you'll need the following options as well...
     CONFIG_DECNET_ROUTER (to be able to add/delete routes)
     CONFIG_NETFILTER (will be required for the DECnet routing daemon)
 
-    CONFIG_DECNET_ROUTE_FWMARK is optional
-
 Don't turn on SIOCGIFCONF support for DECnet unless you are really sure
 that you need it, in general you won't and it can cause ifconfig to
 malfunction.
index 5449149be496fa8448fa5b74bafe2c5c796cb06d..984e68f9e0269507132846517a4c4c2b8d726216 100644 (file)
@@ -9,6 +9,7 @@ Contents:
    netdev-FAQ
    af_xdp
    batman-adv
+   bpf_flow_dissector
    can
    can_ucan_protocol
    device_drivers/freescale/dpaa2/index
index acdfb5d2bcaa44a8a0ecdcfcae14202d1ed75bc3..c4ac35234f0551bdca1f774f3570fe100912474c 100644 (file)
@@ -422,6 +422,7 @@ tcp_min_rtt_wlen - INTEGER
        minimum RTT when it is moved to a longer path (e.g., due to traffic
        engineering). A longer window makes the filter more resistant to RTT
        inflations such as transient congestion. The unit is seconds.
+       Possible values: 0 - 86400 (1 day)
        Default: 300
 
 tcp_moderate_rcvbuf - BOOLEAN
@@ -1336,6 +1337,7 @@ tag - INTEGER
        Default value is 0.
 
 xfrm4_gc_thresh - INTEGER
+       (Obsolete since linux-4.14)
        The threshold at which we will start garbage collecting for IPv4
        destination cache entries.  At twice this value the system will
        refuse new allocations.
@@ -1919,6 +1921,7 @@ echo_ignore_all - BOOLEAN
        Default: 0
 
 xfrm6_gc_thresh - INTEGER
+       (Obsolete since linux-4.14)
        The threshold at which we will start garbage collecting for IPv6
        destination cache entries.  At twice this value the system will
        refuse new allocations.
index 18c1415e7bfad8f6e6e9b03febaf47f83a0f9915..ace56204dd03b1de816a89e77ad1b0d05bdbbd03 100644 (file)
@@ -50,7 +50,7 @@ the excellent reporting over at LWN.net or read the original code.
 
   patchset
     [PATCH net-next v4 0/9] socket sendmsg MSG_ZEROCOPY
-    http://lkml.kernel.org/r/20170803202945.70750-1-willemdebruijn.kernel@gmail.com
+    https://lkml.kernel.org/netdev/20170803202945.70750-1-willemdebruijn.kernel@gmail.com
 
 
 Interface
index 0ac5fa77f50173c139376a3f2c271faff2e5e569..642fa963be3cf8f325c29947072e6c6851213d4b 100644 (file)
@@ -131,6 +131,19 @@ it to the maintainer to figure out what is the most recent and current
 version that should be applied. If there is any doubt, the maintainer
 will reply and ask what should be done.
 
+Q: I made changes to only a few patches in a patch series should I resend only those changed?
+---------------------------------------------------------------------------------------------
+A: No, please resend the entire patch series and make sure you do number your
+patches such that it is clear this is the latest and greatest set of patches
+that can be applied.
+
+Q: I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
+-------------------------------------------------------------------------------------------------------------------------------------------
+A: There is no revert possible, once it is pushed out, it stays like that.
+Please send incremental versions on top of what has been merged in order to fix
+the patches the way they would look like if your latest patch series was to be
+merged.
+
 Q: How can I tell what patches are queued up for backporting to the various stable releases?
 --------------------------------------------------------------------------------------------
 A: Normally Greg Kroah-Hartman collects stable commits himself, but for
index 54128c50d508ef27e5c6f2026fc5dddd0df47ead..ca2136c76042c4ded1aa1608ea38f405e04772da 100644 (file)
@@ -44,10 +44,10 @@ including the Netfilter hooks and the flowtable fastpath bypass.
      /         \    /          \     |Routing |   /            \
   -->  ingress  ---> prerouting ---> |decision|   | postrouting |--> neigh_xmit
      \_________/    \__________/     ----------   \____________/          ^
-       |      ^          |               |               ^                |
-   flowtable  |          |          ____\/___            |                |
-       |      |          |         /         \           |                |
-    __\/___   |          --------->| forward |------------                |
+       |      ^                          |               ^                |
+   flowtable  |                     ____\/___            |                |
+       |      |                    /         \           |                |
+    __\/___   |                    | forward |------------                |
     |-----|   |                    \_________/                            |
     |-----|   |                 'flow offload' rule                       |
     |-----|   |                   adds entry to                           |
index 2df5894353d6954f5c0dd26d2c149c6e9ee6ee4c..cd7303d7fa25dac9ae38d0e73186f3687b7872a7 100644 (file)
@@ -1009,16 +1009,18 @@ The kernel interface functions are as follows:
 
  (*) Check call still alive.
 
-       u32 rxrpc_kernel_check_life(struct socket *sock,
-                                   struct rxrpc_call *call);
+       bool rxrpc_kernel_check_life(struct socket *sock,
+                                    struct rxrpc_call *call,
+                                    u32 *_life);
        void rxrpc_kernel_probe_life(struct socket *sock,
                                     struct rxrpc_call *call);
 
-     The first function returns a number that is updated when ACKs are received
-     from the peer (notably including PING RESPONSE ACKs which we can elicit by
-     sending PING ACKs to see if the call still exists on the server).  The
-     caller should compare the numbers of two calls to see if the call is still
-     alive after waiting for a suitable interval.
+     The first function passes back in *_life a number that is updated when
+     ACKs are received from the peer (notably including PING RESPONSE ACKs
+     which we can elicit by sending PING ACKs to see if the call still exists
+     on the server).  The caller should compare the numbers of two calls to see
+     if the call is still alive after waiting for a suitable interval.  It also
+     returns true as long as the call hasn't yet reached the completed state.
 
      This allows the caller to work out if the server is still contactable and
      if the call is still alive on the server while waiting for the server to
index 52b026be028f65f02aa8bf4a8816ab56f9e509ff..38a4edc4522b46f6ad3859f411eb46dfa4bc7f94 100644 (file)
@@ -413,7 +413,7 @@ algorithm.
 .. _F-RTO: https://tools.ietf.org/html/rfc5682
 
 TCP Fast Path
-============
+=============
 When kernel receives a TCP packet, it has two paths to handler the
 packet, one is fast path, another is slow path. The comment in kernel
 code provides a good explanation of them, I pasted them below::
@@ -681,6 +681,7 @@ The TCP stack receives an out of order duplicate packet, so it sends a
 DSACK to the sender.
 
 * TcpExtTCPDSACKRecv
+
 The TCP stack receives a DSACK, which indicates an acknowledged
 duplicate packet is received.
 
@@ -690,7 +691,7 @@ The TCP stack receives a DSACK, which indicate an out of order
 duplicate packet is received.
 
 invalid SACK and DSACK
-====================
+======================
 When a SACK (or DSACK) block is invalid, a corresponding counter would
 be updated. The validation method is base on the start/end sequence
 number of the SACK block. For more details, please refer the comment
@@ -704,11 +705,13 @@ explaination:
 .. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32
 
 * TcpExtTCPSACKDiscard
+
 This counter indicates how many SACK blocks are invalid. If the invalid
 SACK block is caused by ACK recording, the TCP stack will only ignore
 it and won't update this counter.
 
 * TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo
+
 When a DSACK block is invalid, one of these two counters would be
 updated. Which counter will be updated depends on the undo_marker flag
 of the TCP socket. If the undo_marker is not set, the TCP stack isn't
@@ -719,7 +722,7 @@ will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld
 will be updated. As implied in its name, it might be an old packet.
 
 SACK shift
-=========
+==========
 The linux networking stack stores data in sk_buff struct (skb for
 short). If a SACK block acrosses multiple skb, the TCP stack will try
 to re-arrange data in these skb. E.g. if a SACK block acknowledges seq
@@ -730,12 +733,15 @@ seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be
 discard, this operation is 'merge'.
 
 * TcpExtTCPSackShifted
+
 A skb is shifted
 
 * TcpExtTCPSackMerged
+
 A skb is merged
 
 * TcpExtTCPSackShiftFallback
+
 A skb should be shifted or merged, but the TCP stack doesn't do it for
 some reasons.
 
index 6af24cdb25ccb51a947d0bf50f3442b53a963d81..3f13d8599337ea8a010d3a33ae605201691a427e 100644 (file)
@@ -866,14 +866,14 @@ The intent is that compaction has less work to do in the future and to
 increase the success rate of future high-order allocations such as SLUB
 allocations, THP and hugetlbfs pages.
 
-To make it sensible with respect to the watermark_scale_factor parameter,
-the unit is in fractions of 10,000. The default value of 15,000 means
-that up to 150% of the high watermark will be reclaimed in the event of
-a pageblock being mixed due to fragmentation. The level of reclaim is
-determined by the number of fragmentation events that occurred in the
-recent past. If this value is smaller than a pageblock then a pageblocks
-worth of pages will be reclaimed (e.g.  2MB on 64-bit x86). A boost factor
-of 0 will disable the feature.
+To make it sensible with respect to the watermark_scale_factor
+parameter, the unit is in fractions of 10,000. The default value of
+15,000 on !DISCONTIGMEM configurations means that up to 150% of the high
+watermark will be reclaimed in the event of a pageblock being mixed due
+to fragmentation. The level of reclaim is determined by the number of
+fragmentation events that occurred in the recent past. If this value is
+smaller than a pageblock then a pageblocks worth of pages will be reclaimed
+(e.g.  2MB on 64-bit x86). A boost factor of 0 will disable the feature.
 
 =============================================================
 
index 7f01fb1c10842dbd87bf8b1da834272c2ac92a47..db0b9d8619f1aefbcc0c750d9211471228bb11db 100644 (file)
@@ -493,10 +493,8 @@ CPU 에게 기대할 수 있는 최소한의 보장사항 몇가지가 있습니
      이 타입의 오퍼레이션은 단방향의 투과성 배리어처럼 동작합니다.  ACQUIRE
      오퍼레이션 뒤의 모든 메모리 오퍼레이션들이 ACQUIRE 오퍼레이션 후에
      일어난 것으로 시스템의 나머지 컴포넌트들에 보이게 될 것이 보장됩니다.
-     LOCK 오퍼레이션과 smp_load_acquire(), smp_cond_acquire() 오퍼레이션도
-     ACQUIRE 오퍼레이션에 포함됩니다.  smp_cond_acquire() 오퍼레이션은 컨트롤
-     의존성과 smp_rmb() 를 사용해서 ACQUIRE 의 의미적 요구사항(semantic)을
-     충족시킵니다.
+     LOCK 오퍼레이션과 smp_load_acquire(), smp_cond_load_acquire() 오퍼레이션도
+     ACQUIRE 오퍼레이션에 포함됩니다.
 
      ACQUIRE 오퍼레이션 앞의 메모리 오퍼레이션들은 ACQUIRE 오퍼레이션 완료 후에
      수행된 것처럼 보일 수 있습니다.
@@ -2146,33 +2144,40 @@ set_current_state() 는 다음의 것들로 감싸질 수도 있습니다:
        event_indicated = 1;
        wake_up_process(event_daemon);
 
-wake_up() 류에 의해 쓰기 메모리 배리어가 내포됩니다.  만약 그것들이 뭔가를
-깨운다면요.  이 배리어는 태스크 상태가 지워지기 전에 수행되므로, 이벤트를
-알리기 위한 STORE 와 태스크 상태를 TASK_RUNNING 으로 설정하는 STORE 사이에
-위치하게 됩니다.
+wake_up() 이 무언가를 깨우게 되면, 이 함수는 범용 메모리 배리어를 수행합니다.
+이 함수가 아무것도 깨우지 않는다면 메모리 배리어는 수행될 수도, 수행되지 않을
+수도 있습니다; 이 경우에 메모리 배리어를 수행할 거라 오해해선 안됩니다.  이
+배리어는 태스크 상태가 접근되기 전에 수행되는데, 자세히 말하면 이 이벤트를
+알리기 위한 STORE 와 TASK_RUNNING 으로 상태를 쓰는 STORE 사이에 수행됩니다:
 
-       CPU 1                           CPU 2
+       CPU 1 (Sleeper)                 CPU 2 (Waker)
        =============================== ===============================
        set_current_state();            STORE event_indicated
          smp_store_mb();               wake_up();
-           STORE current->state          <쓰기 배리어>
-           <범용 배리어>            STORE current->state
-       LOAD event_indicated
+           STORE current->state          ...
+           <범용 배리어>            <범용 배리어>
+       LOAD event_indicated              if ((LOAD task->state) & TASK_NORMAL)
+                                           STORE task->state
 
-한번더 말합니다만, 이 쓰기 메모리 배리어는 이 코드가 정말로 뭔가를 깨울 때에만
-실행됩니다.  이걸 설명하기 위해, X 와 Y 는 모두 0 으로 초기화 되어 있다는 가정
-하에 아래의 이벤트 시퀀스를 생각해 봅시다:
+여기서 "task" 는 깨어나지는 쓰레드이고 CPU 1 의 "current" 와 같습니다.
+
+반복하지만, wake_up() 이 무언가를 정말 깨운다면 범용 메모리 배리어가 수행될
+것이 보장되지만, 그렇지 않다면 그런 보장이 없습니다.  이걸 이해하기 위해, X 와
+Y 는 모두 0 으로 초기화 되어 있다는 가정 하에 아래의 이벤트 시퀀스를 생각해
+봅시다:
 
        CPU 1                           CPU 2
        =============================== ===============================
-       X = 1;                          STORE event_indicated
+       X = 1;                          Y = 1;
        smp_mb();                       wake_up();
-       Y = 1;                          wait_event(wq, Y == 1);
-       wake_up();                        load from Y sees 1, no memory barrier
-                                       load from X might see 0
+       LOAD Y                          LOAD X
+
+정말로 깨우기가 행해졌다면, 두 로드 중 (최소한) 하나는 1 을 보게 됩니다.
+반면에, 실제 깨우기가 행해지지 않았다면, 두 로드 모두 0을 볼 수도 있습니다.
 
-위 예제에서의 경우와 달리 깨우기가 정말로 행해졌다면, CPU 2 의 X 로드는 1 을
-본다고 보장될 수 있을 겁니다.
+wake_up_process() 는 항상 범용 메모리 배리어를 수행합니다.  이 배리어 역시
+태스크 상태가 접근되기 전에 수행됩니다.  특히, 앞의 예제 코드에서 wake_up() 이
+wake_up_process() 로 대체된다면 두 로드 중 하나는 1을 볼 것이 보장됩니다.
 
 사용 가능한 깨우기류 함수들로 다음과 같은 것들이 있습니다:
 
@@ -2192,6 +2197,8 @@ wake_up() 류에 의해 쓰기 메모리 배리어가 내포됩니다.  만약 
        wake_up_poll();
        wake_up_process();
 
+메모리 순서규칙 관점에서, 이 함수들은 모두 wake_up() 과 같거나 보다 강한 순서
+보장을 제공합니다.
 
 [!] 잠재우는 코드와 깨우는 코드에 내포되는 메모리 배리어들은 깨우기 전에
 이루어진 스토어를 잠재우는 코드가 set_current_state() 를 호출한 후에 행하는
index 7de9eee73fcd9d533aec2c1bc88d413f6216db73..64b38dfcc243964bfdccc905908c1d39b965817d 100644 (file)
@@ -5,25 +5,32 @@ The Definitive KVM (Kernel-based Virtual Machine) API Documentation
 ----------------------
 
 The kvm API is a set of ioctls that are issued to control various aspects
-of a virtual machine.  The ioctls belong to three classes
+of a virtual machine.  The ioctls belong to three classes:
 
  - System ioctls: These query and set global attributes which affect the
    whole kvm subsystem.  In addition a system ioctl is used to create
-   virtual machines
+   virtual machines.
 
  - VM ioctls: These query and set attributes that affect an entire virtual
    machine, for example memory layout.  In addition a VM ioctl is used to
-   create virtual cpus (vcpus).
+   create virtual cpus (vcpus) and devices.
 
-   Only run VM ioctls from the same process (address space) that was used
-   to create the VM.
+   VM ioctls must be issued from the same process (address space) that was
+   used to create the VM.
 
  - vcpu ioctls: These query and set attributes that control the operation
    of a single virtual cpu.
 
-   Only run vcpu ioctls from the same thread that was used to create the
-   vcpu.
+   vcpu ioctls should be issued from the same thread that was used to create
+   the vcpu, except for asynchronous vcpu ioctl that are marked as such in
+   the documentation.  Otherwise, the first ioctl after switching threads
+   could see a performance impact.
 
+ - device ioctls: These query and set attributes that control the operation
+   of a single device.
+
+   device ioctls must be issued from the same process (address space) that
+   was used to create the VM.
 
 2. File descriptors
 -------------------
@@ -32,17 +39,34 @@ The kvm API is centered around file descriptors.  An initial
 open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
 can be used to issue system ioctls.  A KVM_CREATE_VM ioctl on this
 handle will create a VM file descriptor which can be used to issue VM
-ioctls.  A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu
-and return a file descriptor pointing to it.  Finally, ioctls on a vcpu
-fd can be used to control the vcpu, including the important task of
-actually running guest code.
+ioctls.  A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will
+create a virtual cpu or device and return a file descriptor pointing to
+the new resource.  Finally, ioctls on a vcpu or device fd can be used
+to control the vcpu or device.  For vcpus, this includes the important
+task of actually running guest code.
 
 In general file descriptors can be migrated among processes by means
 of fork() and the SCM_RIGHTS facility of unix domain socket.  These
 kinds of tricks are explicitly not supported by kvm.  While they will
 not cause harm to the host, their actual behavior is not guaranteed by
-the API.  The only supported use is one virtual machine per process,
-and one vcpu per thread.
+the API.  See "General description" for details on the ioctl usage
+model that is supported by KVM.
+
+It is important to note that althought VM ioctls may only be issued from
+the process that created the VM, a VM's lifecycle is associated with its
+file descriptor, not its creator (process).  In other words, the VM and
+its resources, *including the associated address space*, are not freed
+until the last reference to the VM's file descriptor has been released.
+For example, if fork() is issued after ioctl(KVM_CREATE_VM), the VM will
+not be freed until both the parent (original) process and its child have
+put their references to the VM's file descriptor.
+
+Because a VM's resources are not freed until the last reference to its
+file descriptor is released, creating additional references to a VM via
+via fork(), dup(), etc... without careful consideration is strongly
+discouraged and may have unwanted side effects, e.g. memory allocated
+by and on behalf of the VM's process may not be freed/unaccounted when
+the VM is shut down.
 
 
 It is important to note that althought VM ioctls may only be issued from
@@ -297,7 +321,7 @@ cpu's hardware control block.
 4.8 KVM_GET_DIRTY_LOG (vm ioctl)
 
 Capability: basic
-Architectures: x86
+Architectures: all
 Type: vm ioctl
 Parameters: struct kvm_dirty_log (in/out)
 Returns: 0 on success, -1 on error
@@ -515,11 +539,15 @@ c) KVM_INTERRUPT_SET_LEVEL
 Note that any value for 'irq' other than the ones stated above is invalid
 and incurs unexpected behavior.
 
+This is an asynchronous vcpu ioctl and can be invoked from any thread.
+
 MIPS:
 
 Queues an external interrupt to be injected into the virtual CPU. A negative
 interrupt number dequeues the interrupt.
 
+This is an asynchronous vcpu ioctl and can be invoked from any thread.
+
 
 4.17 KVM_DEBUG_GUEST
 
@@ -1086,14 +1114,12 @@ struct kvm_userspace_memory_region {
 #define KVM_MEM_LOG_DIRTY_PAGES        (1UL << 0)
 #define KVM_MEM_READONLY       (1UL << 1)
 
-This ioctl allows the user to create or modify a guest physical memory
-slot.  When changing an existing slot, it may be moved in the guest
-physical memory space, or its flags may be modified.  It may not be
-resized.  Slots may not overlap in guest physical address space.
-Bits 0-15 of "slot" specifies the slot id and this value should be
-less than the maximum number of user memory slots supported per VM.
-The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
-if this capability is supported by the architecture.
+This ioctl allows the user to create, modify or delete a guest physical
+memory slot.  Bits 0-15 of "slot" specify the slot id and this value
+should be less than the maximum number of user memory slots supported per
+VM.  The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
+if this capability is supported by the architecture.  Slots may not
+overlap in guest physical address space.
 
 If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot"
 specifies the address space which is being modified.  They must be
@@ -1102,6 +1128,10 @@ KVM_CAP_MULTI_ADDRESS_SPACE capability.  Slots in separate address spaces
 are unrelated; the restriction on overlapping slots only applies within
 each address space.
 
+Deleting a slot is done by passing zero for memory_size.  When changing
+an existing slot, it may be moved in the guest physical memory space,
+or its flags may be modified, but it may not be resized.
+
 Memory for the region is taken starting at the address denoted by the
 field userspace_addr, which must point at user addressable memory for
 the entire memory slot size.  Any object may back this memory, including
@@ -2493,7 +2523,7 @@ KVM_S390_MCHK (vm, vcpu) - machine check interrupt; cr 14 bits in parm,
                            machine checks needing further payload are not
                            supported by this ioctl)
 
-Note that the vcpu ioctl is asynchronous to vcpu execution.
+This is an asynchronous vcpu ioctl and can be invoked from any thread.
 
 4.78 KVM_PPC_GET_HTAB_FD
 
@@ -3042,8 +3072,7 @@ KVM_S390_INT_EMERGENCY - sigp emergency; parameters in .emerg
 KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall
 KVM_S390_MCHK - machine check interrupt; parameters in .mchk
 
-
-Note that the vcpu ioctl is asynchronous to vcpu execution.
+This is an asynchronous vcpu ioctl and can be invoked from any thread.
 
 4.94 KVM_S390_GET_IRQ_STATE
 
@@ -3781,7 +3810,7 @@ to I/O ports.
 4.117 KVM_CLEAR_DIRTY_LOG (vm ioctl)
 
 Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT
-Architectures: x86
+Architectures: x86, arm, arm64, mips
 Type: vm ioctl
 Parameters: struct kvm_dirty_log (in)
 Returns: 0 on success, -1 on error
@@ -3801,8 +3830,9 @@ The ioctl clears the dirty status of pages in a memory slot, according to
 the bitmap that is passed in struct kvm_clear_dirty_log's dirty_bitmap
 field.  Bit 0 of the bitmap corresponds to page "first_page" in the
 memory slot, and num_pages is the size in bits of the input bitmap.
-Both first_page and num_pages must be a multiple of 64.  For each bit
-that is set in the input bitmap, the corresponding page is marked "clean"
+first_page must be a multiple of 64; num_pages must also be a multiple of
+64 unless first_page + num_pages is the size of the memory slot.  For each
+bit that is set in the input bitmap, the corresponding page is marked "clean"
 in KVM's dirty bitmap, and dirty tracking is re-enabled for that page
 (for example via write-protection, or by clearing the dirty bit in
 a page table entry).
@@ -4770,7 +4800,7 @@ and injected exceptions.
 
 7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT
 
-Architectures: all
+Architectures: x86, arm, arm64, mips
 Parameters: args[0] whether feature should be enabled or not
 
 With this capability enabled, KVM_GET_DIRTY_LOG will not automatically
index f365102c80f5dd64133cbe60a8a4fd76fc86393d..2efe0efc516e1624e89f3e60def8551bf10c05e7 100644 (file)
@@ -142,7 +142,7 @@ Shadow pages contain the following information:
     If clear, this page corresponds to a guest page table denoted by the gfn
     field.
   role.quadrant:
-    When role.cr4_pae=0, the guest uses 32-bit gptes while the host uses 64-bit
+    When role.gpte_is_8_bytes=0, the guest uses 32-bit gptes while the host uses 64-bit
     sptes.  That means a guest page table contains more ptes than the host,
     so multiple shadow pages are needed to shadow one guest page.
     For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the
@@ -158,9 +158,9 @@ Shadow pages contain the following information:
     The page is invalid and should not be used.  It is a root page that is
     currently pinned (by a cpu hardware register pointing to it); once it is
     unpinned it will be destroyed.
-  role.cr4_pae:
-    Contains the value of cr4.pae for which the page is valid (e.g. whether
-    32-bit or 64-bit gptes are in use).
+  role.gpte_is_8_bytes:
+    Reflects the size of the guest PTE for which the page is valid, i.e. '1'
+    if 64-bit gptes are in use, '0' if 32-bit gptes are in use.
   role.nxe:
     Contains the value of efer.nxe for which the page is valid.
   role.cr0_wp:
@@ -173,6 +173,9 @@ Shadow pages contain the following information:
     Contains the value of cr4.smap && !cr0.wp for which the page is valid
     (pages for which this is true are different from other pages; see the
     treatment of cr0.wp=0 below).
+  role.ept_sp:
+    This is a virtual flag to denote a shadowed nested EPT page.  ept_sp
+    is true if "cr0_wp && smap_andnot_wp", an otherwise invalid combination.
   role.smm:
     Is 1 if the page is valid in system management mode.  This field
     determines which of the kvm_memslots array was used to build this
index e17ebf70b5480ecc232ce1f62aedf95a03b5f403..7be412e1a38003b07d6eb03143cfaf99951e9ad5 100644 (file)
@@ -1893,14 +1893,15 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git
 ARM/NUVOTON NPCM ARCHITECTURE
 M:     Avi Fishman <avifishman70@gmail.com>
 M:     Tomer Maimon <tmaimon77@gmail.com>
+M:     Tali Perry <tali.perry1@gmail.com>
 R:     Patrick Venture <venture@google.com>
 R:     Nancy Yuen <yuenn@google.com>
-R:     Brendan Higgins <brendanhiggins@google.com>
+R:     Benjamin Fair <benjaminfair@google.com>
 L:     openbmc@lists.ozlabs.org (moderated for non-subscribers)
 S:     Supported
 F:     arch/arm/mach-npcm/
 F:     arch/arm/boot/dts/nuvoton-npcm*
-F:     include/dt-bindings/clock/nuvoton,npcm7xx-clks.h
+F:     include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
 F:     drivers/*/*npcm*
 F:     Documentation/devicetree/bindings/*/*npcm*
 F:     Documentation/devicetree/bindings/*/*/*npcm*
@@ -2356,7 +2357,7 @@ F:        arch/arm/mm/cache-uniphier.c
 F:     arch/arm64/boot/dts/socionext/uniphier*
 F:     drivers/bus/uniphier-system-bus.c
 F:     drivers/clk/uniphier/
-F:     drivers/dmaengine/uniphier-mdmac.c
+F:     drivers/dma/uniphier-mdmac.c
 F:     drivers/gpio/gpio-uniphier.c
 F:     drivers/i2c/busses/i2c-uniphier*
 F:     drivers/irqchip/irq-uniphier-aidet.c
@@ -3120,6 +3121,7 @@ F:        drivers/cpufreq/bmips-cpufreq.c
 BROADCOM BMIPS MIPS ARCHITECTURE
 M:     Kevin Cernekee <cernekee@gmail.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
+L:     bcm-kernel-feedback-list@broadcom.com
 L:     linux-mips@vger.kernel.org
 T:     git git://github.com/broadcom/stblinux.git
 S:     Maintained
@@ -4129,7 +4131,7 @@ F:        drivers/cpuidle/*
 F:     include/linux/cpuidle.h
 
 CRAMFS FILESYSTEM
-M:     Nicolas Pitre <nico@linaro.org>
+M:     Nicolas Pitre <nico@fluxnic.net>
 S:     Maintained
 F:     Documentation/filesystems/cramfs.txt
 F:     fs/cramfs/
@@ -5833,7 +5835,7 @@ L:        netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-bus-mdio
 F:     Documentation/devicetree/bindings/net/mdio*
-F:     Documentation/networking/phy.txt
+F:     Documentation/networking/phy.rst
 F:     drivers/net/phy/
 F:     drivers/of/of_mdio.c
 F:     drivers/of/of_net.c
@@ -6408,7 +6410,6 @@ L:        linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
 S:     Maintained
 F:     kernel/futex.c
-F:     kernel/futex_compat.c
 F:     include/asm-generic/futex.h
 F:     include/linux/futex.h
 F:     include/uapi/linux/futex.h
@@ -6461,7 +6462,7 @@ S:        Maintained
 F:     drivers/media/radio/radio-gemtek*
 
 GENERIC GPIO I2C DRIVER
-M:     Haavard Skinnemoen <hskinnemoen@gmail.com>
+M:     Wolfram Sang <wsa+renesas@sang-engineering.com>
 S:     Supported
 F:     drivers/i2c/busses/i2c-gpio.c
 F:     include/linux/platform_data/i2c-gpio.h
@@ -7333,7 +7334,6 @@ F:        Documentation/devicetree/bindings/i3c/
 F:     Documentation/driver-api/i3c
 F:     drivers/i3c/
 F:     include/linux/i3c/
-F:     include/dt-bindings/i3c/
 
 I3C DRIVER FOR SYNOPSYS DESIGNWARE
 M:     Vitor Soares <vitor.soares@synopsys.com>
@@ -7516,7 +7516,7 @@ F:        include/net/mac802154.h
 F:     include/net/af_ieee802154.h
 F:     include/net/cfg802154.h
 F:     include/net/ieee802154_netdev.h
-F:     Documentation/networking/ieee802154.txt
+F:     Documentation/networking/ieee802154.rst
 
 IFE PROTOCOL
 M:     Yotam Gigi <yotam.gi@gmail.com>
@@ -8096,6 +8096,16 @@ F:       include/linux/iommu.h
 F:     include/linux/of_iommu.h
 F:     include/linux/iova.h
 
+IO_URING
+M:     Jens Axboe <axboe@kernel.dk>
+L:     linux-block@vger.kernel.org
+L:     linux-fsdevel@vger.kernel.org
+T:     git git://git.kernel.dk/linux-block
+T:     git git://git.kernel.dk/liburing
+S:     Maintained
+F:     fs/io_uring.c
+F:     include/uapi/linux/io_uring.h
+
 IP MASQUERADING
 M:     Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
 S:     Maintained
@@ -8698,6 +8708,7 @@ F:        scripts/leaking_addresses.pl
 LED SUBSYSTEM
 M:     Jacek Anaszewski <jacek.anaszewski@gmail.com>
 M:     Pavel Machek <pavel@ucw.cz>
+R:     Dan Murphy <dmurphy@ti.com>
 L:     linux-leds@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
 S:     Maintained
@@ -8983,7 +8994,7 @@ R:        Daniel Lustig <dlustig@nvidia.com>
 L:     linux-kernel@vger.kernel.org
 L:     linux-arch@vger.kernel.org
 S:     Supported
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
 F:     tools/memory-model/
 F:     Documentation/atomic_bitops.txt
 F:     Documentation/atomic_t.txt
@@ -9089,7 +9100,6 @@ F:        arch/*/include/asm/spinlock*.h
 F:     include/linux/rwlock*.h
 F:     include/linux/mutex*.h
 F:     include/linux/rwsem*.h
-F:     arch/*/include/asm/rwsem.h
 F:     include/linux/seqlock.h
 F:     lib/locking*.[ch]
 F:     kernel/locking/
@@ -10135,7 +10145,7 @@ F:      drivers/spi/spi-at91-usart.c
 F:     Documentation/devicetree/bindings/mfd/atmel-usart.txt
 
 MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
-M:     Woojung Huh <Woojung.Huh@microchip.com>
+M:     Woojung Huh <woojung.huh@microchip.com>
 M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -12165,6 +12175,7 @@ F:      arch/*/kernel/*/*/perf_event*.c
 F:     arch/*/include/asm/perf_event.h
 F:     arch/*/kernel/perf_callchain.c
 F:     arch/*/events/*
+F:     arch/*/events/*/*
 F:     tools/perf/
 
 PERSONALITY HANDLING
@@ -13031,9 +13042,9 @@ M:      Josh Triplett <josh@joshtriplett.org>
 R:     Steven Rostedt <rostedt@goodmis.org>
 R:     Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
 R:     Lai Jiangshan <jiangshanlai@gmail.com>
-L:     linux-kernel@vger.kernel.org
+L:     rcu@vger.kernel.org
 S:     Supported
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
 F:     tools/testing/selftests/rcutorture
 
 RDC R-321X SoC
@@ -13079,10 +13090,10 @@ R:    Steven Rostedt <rostedt@goodmis.org>
 R:     Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
 R:     Lai Jiangshan <jiangshanlai@gmail.com>
 R:     Joel Fernandes <joel@joelfernandes.org>
-L:     linux-kernel@vger.kernel.org
+L:     rcu@vger.kernel.org
 W:     http://www.rdrop.com/users/paulmck/RCU/
 S:     Supported
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
 F:     Documentation/RCU/
 X:     Documentation/RCU/torture.txt
 F:     include/linux/rcu*
@@ -13972,7 +13983,7 @@ F:      drivers/media/rc/serial_ir.c
 SFC NETWORK DRIVER
 M:     Solarflare linux maintainers <linux-net-drivers@solarflare.com>
 M:     Edward Cree <ecree@solarflare.com>
-M:     Bert Kenward <bkenward@solarflare.com>
+M:     Martin Habets <mhabets@solarflare.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/sfc/
@@ -14234,10 +14245,10 @@ M:    "Paul E. McKenney" <paulmck@linux.ibm.com>
 M:     Josh Triplett <josh@joshtriplett.org>
 R:     Steven Rostedt <rostedt@goodmis.org>
 R:     Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-L:     linux-kernel@vger.kernel.org
+L:     rcu@vger.kernel.org
 W:     http://www.rdrop.com/users/paulmck/RCU/
 S:     Supported
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
 F:     include/linux/srcu*.h
 F:     kernel/rcu/srcu*.c
 
@@ -15684,7 +15695,7 @@ M:      "Paul E. McKenney" <paulmck@linux.ibm.com>
 M:     Josh Triplett <josh@joshtriplett.org>
 L:     linux-kernel@vger.kernel.org
 S:     Supported
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
 F:     Documentation/RCU/torture.txt
 F:     kernel/torture.c
 F:     kernel/rcu/rcutorture.c
@@ -16499,7 +16510,7 @@ F:      drivers/char/virtio_console.c
 F:     include/linux/virtio_console.h
 F:     include/uapi/linux/virtio_console.h
 
-VIRTIO CORE, NET AND BLOCK DRIVERS
+VIRTIO CORE AND NET DRIVERS
 M:     "Michael S. Tsirkin" <mst@redhat.com>
 M:     Jason Wang <jasowang@redhat.com>
 L:     virtualization@lists.linux-foundation.org
@@ -16514,6 +16525,19 @@ F:     include/uapi/linux/virtio_*.h
 F:     drivers/crypto/virtio/
 F:     mm/balloon_compaction.c
 
+VIRTIO BLOCK AND SCSI DRIVERS
+M:     "Michael S. Tsirkin" <mst@redhat.com>
+M:     Jason Wang <jasowang@redhat.com>
+R:     Paolo Bonzini <pbonzini@redhat.com>
+R:     Stefan Hajnoczi <stefanha@redhat.com>
+L:     virtualization@lists.linux-foundation.org
+S:     Maintained
+F:     drivers/block/virtio_blk.c
+F:     drivers/scsi/virtio_scsi.c
+F:     include/uapi/linux/virtio_blk.h
+F:     include/uapi/linux/virtio_scsi.h
+F:     drivers/vhost/scsi.c
+
 VIRTIO CRYPTO DRIVER
 M:     Gonglei <arei.gonglei@huawei.com>
 L:     virtualization@lists.linux-foundation.org
index 99c0530489ef000781e25e8e677e5332d328628f..26c92f892d24b1481b3b3ee29e1a53224c4e704d 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 1
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION =
 NAME = Shy Crocodile
 
 # *DOCUMENTATION*
@@ -31,26 +31,12 @@ _all:
 # descending is started. They are now explicitly listed as the
 # prepare rule.
 
-# Ugly workaround for Debian make-kpkg:
-# make-kpkg directly includes the top Makefile of Linux kernel. In such a case,
-# skip sub-make to support debian_* targets in ruleset/kernel_version.mk, but
-# displays warning to discourage such abusage.
-ifneq ($(word 2, $(MAKEFILE_LIST)),)
-$(warning Do not include top Makefile of Linux Kernel)
-sub-make-done := 1
-MAKEFLAGS += -rR
-endif
-
-ifneq ($(sub-make-done),1)
+ifneq ($(sub_make_done),1)
 
 # Do not use make's built-in rules and variables
 # (this increases performance and avoids hard-to-debug behaviour)
 MAKEFLAGS += -rR
 
-# 'MAKEFLAGS += -rR' does not become immediately effective for old
-# GNU Make versions. Cancel implicit rules for this Makefile.
-$(lastword $(MAKEFILE_LIST)): ;
-
 # Avoid funny character set dependencies
 unexport LC_ALL
 LC_COLLATE=C
@@ -153,6 +139,7 @@ $(if $(KBUILD_OUTPUT),, \
 # 'sub-make' below.
 MAKEFLAGS += --include-dir=$(CURDIR)
 
+need-sub-make := 1
 else
 
 # Do not print "Entering directory ..." at all for in-tree build.
@@ -160,6 +147,18 @@ MAKEFLAGS += --no-print-directory
 
 endif # ifneq ($(KBUILD_OUTPUT),)
 
+ifneq ($(filter 3.%,$(MAKE_VERSION)),)
+# 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x
+# We need to invoke sub-make to avoid implicit rules in the top Makefile.
+need-sub-make := 1
+# Cancel implicit rules for this Makefile.
+$(lastword $(MAKEFILE_LIST)): ;
+endif
+
+export sub_make_done := 1
+
+ifeq ($(need-sub-make),1)
+
 PHONY += $(MAKECMDGOALS) sub-make
 
 $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
@@ -167,12 +166,15 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
 
 # Invoke a second make in the output directory, passing relevant variables
 sub-make:
-       $(Q)$(MAKE) sub-make-done=1 \
+       $(Q)$(MAKE) \
        $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \
        -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
 
-else # sub-make-done
+endif # need-sub-make
+endif # sub_make_done
+
 # We process the rest of the Makefile if this is the final invocation of make
+ifeq ($(need-sub-make),)
 
 # Do not print "Entering directory ...",
 # but we want to display it when entering to the output directory
@@ -497,7 +499,8 @@ outputmakefile:
 ifneq ($(KBUILD_SRC),)
        $(Q)ln -fsn $(srctree) source
        $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree)
-       $(Q){ echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
+       $(Q)test -e .gitignore || \
+       { echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
 endif
 
 ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
@@ -675,9 +678,10 @@ KBUILD_CFLAGS      += $(call cc-disable-warning,frame-address,)
 KBUILD_CFLAGS  += $(call cc-disable-warning, format-truncation)
 KBUILD_CFLAGS  += $(call cc-disable-warning, format-overflow)
 KBUILD_CFLAGS  += $(call cc-disable-warning, int-in-bool-context)
+KBUILD_CFLAGS  += $(call cc-disable-warning, address-of-packed-member)
 
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS  += $(call cc-option,-Oz,-Os)
+KBUILD_CFLAGS  += -Os
 else
 KBUILD_CFLAGS   += -O2
 endif
@@ -716,7 +720,6 @@ ifdef CONFIG_CC_IS_CLANG
 KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
 KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
 KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
-KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
 # Quiet clang warning: comparison of unsigned expression < 0 is always false
 KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
 # CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
@@ -950,9 +953,11 @@ mod_sign_cmd = true
 endif
 export mod_sign_cmd
 
+HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
+
 ifdef CONFIG_STACK_VALIDATION
   has_libelf := $(call try-run,\
-               echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
+               echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
   ifeq ($(has_libelf),1)
     objtool_target := tools/objtool FORCE
   else
@@ -1757,7 +1762,7 @@ existing-targets := $(wildcard $(sort $(targets)))
 
 endif   # ifeq ($(config-targets),1)
 endif   # ifeq ($(mixed-targets),1)
-endif   # sub-make-done
+endif   # need-sub-make
 
 PHONY += FORCE
 FORCE:
index 33687dddd86a7e04dfa7e7829788b4a0442ae61a..3ab446bd12ef4b4cd463bd245db37094dd0c0504 100644 (file)
@@ -383,7 +383,13 @@ config HAVE_ARCH_JUMP_LABEL_RELATIVE
 config HAVE_RCU_TABLE_FREE
        bool
 
-config HAVE_RCU_TABLE_INVALIDATE
+config HAVE_RCU_TABLE_NO_INVALIDATE
+       bool
+
+config HAVE_MMU_GATHER_PAGE_SIZE
+       bool
+
+config HAVE_MMU_GATHER_NO_GATHER
        bool
 
 config ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -901,6 +907,15 @@ config HAVE_ARCH_PREL32_RELOCATIONS
 config ARCH_USE_MEMREMAP_PROT
        bool
 
+config LOCK_EVENT_COUNTS
+       bool "Locking event counts collection"
+       depends on DEBUG_FS
+       ---help---
+         Enable light-weight counting of various locking related events
+         in the system with minimal performance impact. This reduces
+         the chance of application behavior change because of timing
+         differences. The counts are reported via debugfs.
+
 source "kernel/gcov/Kconfig"
 
 source "scripts/gcc-plugins/Kconfig"
index 584a6e1148539682a34a4c480c3f5252ee169267..f7b19b813a70199bdf8cf48029c5d3f1932fc2aa 100644 (file)
@@ -36,6 +36,7 @@ config ALPHA
        select ODD_RT_SIGACTION
        select OLD_SIGSUSPEND
        select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67
+       select MMU_GATHER_NO_RANGE
        help
          The Alpha is a 64-bit general-purpose processor designed and
          marketed by the Digital Equipment Corporation of blessed memory,
@@ -49,13 +50,6 @@ config MMU
        bool
        default y
 
-config RWSEM_GENERIC_SPINLOCK
-       bool
-
-config RWSEM_XCHGADD_ALGORITHM
-       bool
-       default y
-
 config ARCH_HAS_ILOG2_U32
        bool
        default n
index dc0ab28baca14b5a2eb0aa0064255d023421612c..70b783333965e875a7cb4f0a110327cea46cd663 100644 (file)
@@ -6,6 +6,7 @@ generic-y += exec.h
 generic-y += export.h
 generic-y += fb.h
 generic-y += irq_work.h
+generic-y += kvm_para.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += preempt.h
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
deleted file mode 100644 (file)
index cf8fc8f..0000000
+++ /dev/null
@@ -1,211 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ALPHA_RWSEM_H
-#define _ALPHA_RWSEM_H
-
-/*
- * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
- * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
- */
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-
-#include <linux/compiler.h>
-
-#define RWSEM_UNLOCKED_VALUE           0x0000000000000000L
-#define RWSEM_ACTIVE_BIAS              0x0000000000000001L
-#define RWSEM_ACTIVE_MASK              0x00000000ffffffffL
-#define RWSEM_WAITING_BIAS             (-0x0000000100000000L)
-#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-static inline int ___down_read(struct rw_semaphore *sem)
-{
-       long oldcount;
-#ifndef        CONFIG_SMP
-       oldcount = sem->count.counter;
-       sem->count.counter += RWSEM_ACTIVE_READ_BIAS;
-#else
-       long temp;
-       __asm__ __volatile__(
-       "1:     ldq_l   %0,%1\n"
-       "       addq    %0,%3,%2\n"
-       "       stq_c   %2,%1\n"
-       "       beq     %2,2f\n"
-       "       mb\n"
-       ".subsection 2\n"
-       "2:     br      1b\n"
-       ".previous"
-       :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
-       :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
-#endif
-       return (oldcount < 0);
-}
-
-static inline void __down_read(struct rw_semaphore *sem)
-{
-       if (unlikely(___down_read(sem)))
-               rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_killable(struct rw_semaphore *sem)
-{
-       if (unlikely(___down_read(sem)))
-               if (IS_ERR(rwsem_down_read_failed_killable(sem)))
-                       return -EINTR;
-
-       return 0;
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
-       long old, new, res;
-
-       res = atomic_long_read(&sem->count);
-       do {
-               new = res + RWSEM_ACTIVE_READ_BIAS;
-               if (new <= 0)
-                       break;
-               old = res;
-               res = atomic_long_cmpxchg(&sem->count, old, new);
-       } while (res != old);
-       return res >= 0 ? 1 : 0;
-}
-
-static inline long ___down_write(struct rw_semaphore *sem)
-{
-       long oldcount;
-#ifndef        CONFIG_SMP
-       oldcount = sem->count.counter;
-       sem->count.counter += RWSEM_ACTIVE_WRITE_BIAS;
-#else
-       long temp;
-       __asm__ __volatile__(
-       "1:     ldq_l   %0,%1\n"
-       "       addq    %0,%3,%2\n"
-       "       stq_c   %2,%1\n"
-       "       beq     %2,2f\n"
-       "       mb\n"
-       ".subsection 2\n"
-       "2:     br      1b\n"
-       ".previous"
-       :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
-       :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
-#endif
-       return oldcount;
-}
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
-       if (unlikely(___down_write(sem)))
-               rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_killable(struct rw_semaphore *sem)
-{
-       if (unlikely(___down_write(sem))) {
-               if (IS_ERR(rwsem_down_write_failed_killable(sem)))
-                       return -EINTR;
-       }
-
-       return 0;
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
-       long ret = atomic_long_cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
-                          RWSEM_ACTIVE_WRITE_BIAS);
-       if (ret == RWSEM_UNLOCKED_VALUE)
-               return 1;
-       return 0;
-}
-
-static inline void __up_read(struct rw_semaphore *sem)
-{
-       long oldcount;
-#ifndef        CONFIG_SMP
-       oldcount = sem->count.counter;
-       sem->count.counter -= RWSEM_ACTIVE_READ_BIAS;
-#else
-       long temp;
-       __asm__ __volatile__(
-       "       mb\n"
-       "1:     ldq_l   %0,%1\n"
-       "       subq    %0,%3,%2\n"
-       "       stq_c   %2,%1\n"
-       "       beq     %2,2f\n"
-       ".subsection 2\n"
-       "2:     br      1b\n"
-       ".previous"
-       :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
-       :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
-#endif
-       if (unlikely(oldcount < 0))
-               if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
-                       rwsem_wake(sem);
-}
-
-static inline void __up_write(struct rw_semaphore *sem)
-{
-       long count;
-#ifndef        CONFIG_SMP
-       sem->count.counter -= RWSEM_ACTIVE_WRITE_BIAS;
-       count = sem->count.counter;
-#else
-       long temp;
-       __asm__ __volatile__(
-       "       mb\n"
-       "1:     ldq_l   %0,%1\n"
-       "       subq    %0,%3,%2\n"
-       "       stq_c   %2,%1\n"
-       "       beq     %2,2f\n"
-       "       subq    %0,%3,%0\n"
-       ".subsection 2\n"
-       "2:     br      1b\n"
-       ".previous"
-       :"=&r" (count), "=m" (sem->count), "=&r" (temp)
-       :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
-#endif
-       if (unlikely(count))
-               if ((int)count == 0)
-                       rwsem_wake(sem);
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
-       long oldcount;
-#ifndef        CONFIG_SMP
-       oldcount = sem->count.counter;
-       sem->count.counter -= RWSEM_WAITING_BIAS;
-#else
-       long temp;
-       __asm__ __volatile__(
-       "1:     ldq_l   %0,%1\n"
-       "       addq    %0,%3,%2\n"
-       "       stq_c   %2,%1\n"
-       "       beq     %2,2f\n"
-       "       mb\n"
-       ".subsection 2\n"
-       "2:     br      1b\n"
-       ".previous"
-       :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
-       :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
-#endif
-       if (unlikely(oldcount < 0))
-               rwsem_downgrade_wake(sem);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ALPHA_RWSEM_H */
index 8f5042b61875fdd4c308f2b3e3accab95ff571e8..4f79e331af5ea4237ba8200867bf2161f1676d7c 100644 (file)
@@ -2,12 +2,6 @@
 #ifndef _ALPHA_TLB_H
 #define _ALPHA_TLB_H
 
-#define tlb_start_vma(tlb, vma)                        do { } while (0)
-#define tlb_end_vma(tlb, vma)                  do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
-
-#define tlb_flush(tlb)                         flush_tlb_mm((tlb)->mm)
-
 #include <asm-generic/tlb.h>
 
 #define __pte_free_tlb(tlb, pte, address)              pte_free((tlb)->mm, pte)
diff --git a/arch/alpha/include/uapi/asm/kvm_para.h b/arch/alpha/include/uapi/asm/kvm_para.h
deleted file mode 100644 (file)
index baacc49..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/kvm_para.h>
index 63ed39cbd3bd13a40e98ec3a9aff3b3266b3e235..165f268beafc471e14eac4c8d6d08e52c5c89864 100644 (file)
 532    common  getppid                         sys_getppid
 # all other architectures have common numbers for new syscall, alpha
 # is the exception.
+534    common  pidfd_send_signal               sys_pidfd_send_signal
+535    common  io_uring_setup                  sys_io_uring_setup
+536    common  io_uring_enter                  sys_io_uring_enter
+537    common  io_uring_register               sys_io_uring_register
index df55672c59e6e4b99f5846f12944325c227cab03..23e063df5d2cf1233665b575193e9c5e7e227e52 100644 (file)
@@ -63,9 +63,6 @@ config SCHED_OMIT_FRAME_POINTER
 config GENERIC_CSUM
        def_bool y
 
-config RWSEM_GENERIC_SPINLOCK
-       def_bool y
-
 config ARCH_DISCONTIGMEM_ENABLE
        def_bool n
 
@@ -144,11 +141,11 @@ config ARC_CPU_770
          Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
          This core has a bunch of cool new features:
          -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
-                   Shared Address Spaces (for sharing TLB entries in MMU)
+                  Shared Address Spaces (for sharing TLB entries in MMU)
          -Caches: New Prog Model, Region Flush
          -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
 
-endif  #ISA_ARCOMPACT
+endif #ISA_ARCOMPACT
 
 config ARC_CPU_HS
        bool "ARC-HS"
@@ -198,7 +195,7 @@ config ARC_SMP_HALT_ON_RESET
          at designated entry point. For other case, all jump to common
          entry point and spin wait for Master's signal.
 
-endif  #SMP
+endif #SMP
 
 config ARC_MCIP
        bool "ARConnect Multicore IP (MCIP) Support "
@@ -249,7 +246,7 @@ config ARC_CACHE_VIPT_ALIASING
        bool "Support VIPT Aliasing D$"
        depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
 
-endif  #ARC_CACHE
+endif #ARC_CACHE
 
 config ARC_HAS_ICCM
        bool "Use ICCM"
@@ -370,7 +367,7 @@ config ARC_FPU_SAVE_RESTORE
          based on actual usage of FPU by a task. Thus our implemn does
          this for all tasks in system.
 
-endif  #ISA_ARCOMPACT
+endif #ISA_ARCOMPACT
 
 config ARC_CANT_LLSC
        def_bool n
@@ -386,6 +383,15 @@ config ARC_HAS_SWAPE
 
 if ISA_ARCV2
 
+config ARC_USE_UNALIGNED_MEM_ACCESS
+       bool "Enable unaligned access in HW"
+       default y
+       select HAVE_EFFICIENT_UNALIGNED_ACCESS
+       help
+         The ARC HS architecture supports unaligned memory access
+         which is disabled by default. Enable unaligned access in
+         hardware and use software to use it
+
 config ARC_HAS_LL64
        bool "Insn: 64bit LDD/STD"
        help
@@ -414,7 +420,7 @@ config ARC_IRQ_NO_AUTOSAVE
          This is programmable and can be optionally disabled in which case
          software INTERRUPT_PROLOGUE/EPILGUE do the needed work
 
-endif  # ISA_ARCV2
+endif # ISA_ARCV2
 
 endmenu   # "ARC CPU Configuration"
 
index df00578c279d4bc0ee03d71089769383440e7cf6..e2b991f75bc5b7bc0d8103d65e938df9b9c038ac 100644 (file)
@@ -28,6 +28,12 @@ cflags-$(CONFIG_ARC_HAS_SWAPE)               += -mswape
 
 ifdef CONFIG_ISA_ARCV2
 
+ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
+cflags-y                               += -munaligned-access
+else
+cflags-y                               += -mno-unaligned-access
+endif
+
 ifndef CONFIG_ARC_HAS_LL64
 cflags-y                               += -mno-ll64
 endif
index 02410b2114334466572c05e651b6227b02e415f0..c0bcd97522bbfcfa96b0f9e0fa992d104df474f7 100644 (file)
@@ -38,7 +38,7 @@
                        clock-div = <6>;
                };
 
-               iomux: iomux@FF10601c {
+               iomux: iomux@ff10601c {
                        /* Port 1 */
                        pctl_tsin_s0: pctl-tsin-s0 {   /* Serial TS-in 0 */
                                abilis,function = "mis0";
                        };
                };
 
-               gpioa: gpio@FF140000 {
+               gpioa: gpio@ff140000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF140000 0x1000>;
+                       reg = <0xff140000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <3>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioa";
                };
-               gpiob: gpio@FF141000 {
+               gpiob: gpio@ff141000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF141000 0x1000>;
+                       reg = <0xff141000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <2>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiob";
                };
-               gpioc: gpio@FF142000 {
+               gpioc: gpio@ff142000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF142000 0x1000>;
+                       reg = <0xff142000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <3>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioc";
                };
-               gpiod: gpio@FF143000 {
+               gpiod: gpio@ff143000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF143000 0x1000>;
+                       reg = <0xff143000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <2>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiod";
                };
-               gpioe: gpio@FF144000 {
+               gpioe: gpio@ff144000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF144000 0x1000>;
+                       reg = <0xff144000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <3>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioe";
                };
-               gpiof: gpio@FF145000 {
+               gpiof: gpio@ff145000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF145000 0x1000>;
+                       reg = <0xff145000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <2>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiof";
                };
-               gpiog: gpio@FF146000 {
+               gpiog: gpio@ff146000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF146000 0x1000>;
+                       reg = <0xff146000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <3>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiog";
                };
-               gpioh: gpio@FF147000 {
+               gpioh: gpio@ff147000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF147000 0x1000>;
+                       reg = <0xff147000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <2>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioh";
                };
-               gpioi: gpio@FF148000 {
+               gpioi: gpio@ff148000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF148000 0x1000>;
+                       reg = <0xff148000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <12>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioi";
                };
-               gpioj: gpio@FF149000 {
+               gpioj: gpio@ff149000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF149000 0x1000>;
+                       reg = <0xff149000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <32>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioj";
                };
-               gpiok: gpio@FF14a000 {
+               gpiok: gpio@ff14a000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF14A000 0x1000>;
+                       reg = <0xff14a000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <22>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiok";
                };
-               gpiol: gpio@FF14b000 {
+               gpiol: gpio@ff14b000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF14B000 0x1000>;
+                       reg = <0xff14b000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <4>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiol";
                };
-               gpiom: gpio@FF14c000 {
+               gpiom: gpio@ff14c000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF14C000 0x1000>;
+                       reg = <0xff14c000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <4>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiom";
                };
-               gpion: gpio@FF14d000 {
+               gpion: gpio@ff14d000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF14D000 0x1000>;
+                       reg = <0xff14d000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <5>;
index 3acf04db80302875d86d4e4de355c077bfabbd02..c968e677db46b01e88f344c52a6232f33fda9556 100644 (file)
        };
 
        soc100 {
-               uart@FF100000 {
+               uart@ff100000 {
                        pinctrl-names = "default";
                        pinctrl-0 = <&pctl_uart0>;
                };
-               ethernet@FE100000 {
+               ethernet@fe100000 {
                        phy-mode = "rgmii";
                };
 
-               i2c0: i2c@FF120000 {
+               i2c0: i2c@ff120000 {
                        i2c-sda-hold-time-ns = <432>;
                };
-               i2c1: i2c@FF121000 {
+               i2c1: i2c@ff121000 {
                        i2c-sda-hold-time-ns = <432>;
                };
-               i2c2: i2c@FF122000 {
+               i2c2: i2c@ff122000 {
                        i2c-sda-hold-time-ns = <432>;
                };
-               i2c3: i2c@FF123000 {
+               i2c3: i2c@ff123000 {
                        i2c-sda-hold-time-ns = <432>;
                };
-               i2c4: i2c@FF124000 {
+               i2c4: i2c@ff124000 {
                        i2c-sda-hold-time-ns = <432>;
                };
 
index f9e7686044ebee0a4c49e1d4824810e49747256e..6a1615f58f052d3586dda6038e7b2ebc3a55d161 100644 (file)
@@ -38,7 +38,7 @@
                        clock-div = <6>;
                };
 
-               iomux: iomux@FF10601c {
+               iomux: iomux@ff10601c {
                        /* Port 1 */
                        pctl_tsin_s0: pctl-tsin-s0 {   /* Serial TS-in 0 */
                                abilis,function = "mis0";
                        };
                };
 
-               gpioa: gpio@FF140000 {
+               gpioa: gpio@ff140000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF140000 0x1000>;
+                       reg = <0xff140000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <3>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioa";
                };
-               gpiob: gpio@FF141000 {
+               gpiob: gpio@ff141000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF141000 0x1000>;
+                       reg = <0xff141000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <2>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiob";
                };
-               gpioc: gpio@FF142000 {
+               gpioc: gpio@ff142000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF142000 0x1000>;
+                       reg = <0xff142000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <3>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioc";
                };
-               gpiod: gpio@FF143000 {
+               gpiod: gpio@ff143000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF143000 0x1000>;
+                       reg = <0xff143000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <2>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiod";
                };
-               gpioe: gpio@FF144000 {
+               gpioe: gpio@ff144000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF144000 0x1000>;
+                       reg = <0xff144000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <3>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioe";
                };
-               gpiof: gpio@FF145000 {
+               gpiof: gpio@ff145000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF145000 0x1000>;
+                       reg = <0xff145000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <2>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiof";
                };
-               gpiog: gpio@FF146000 {
+               gpiog: gpio@ff146000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF146000 0x1000>;
+                       reg = <0xff146000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <3>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiog";
                };
-               gpioh: gpio@FF147000 {
+               gpioh: gpio@ff147000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF147000 0x1000>;
+                       reg = <0xff147000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <2>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioh";
                };
-               gpioi: gpio@FF148000 {
+               gpioi: gpio@ff148000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF148000 0x1000>;
+                       reg = <0xff148000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <12>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioi";
                };
-               gpioj: gpio@FF149000 {
+               gpioj: gpio@ff149000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF149000 0x1000>;
+                       reg = <0xff149000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <32>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpioj";
                };
-               gpiok: gpio@FF14a000 {
+               gpiok: gpio@ff14a000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF14A000 0x1000>;
+                       reg = <0xff14a000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <22>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiok";
                };
-               gpiol: gpio@FF14b000 {
+               gpiol: gpio@ff14b000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF14B000 0x1000>;
+                       reg = <0xff14b000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <4>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiol";
                };
-               gpiom: gpio@FF14c000 {
+               gpiom: gpio@ff14c000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF14C000 0x1000>;
+                       reg = <0xff14c000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <4>;
                        gpio-ranges = <&iomux 0 0 0>;
                        gpio-ranges-group-names = "gpiom";
                };
-               gpion: gpio@FF14d000 {
+               gpion: gpio@ff14d000 {
                        compatible = "abilis,tb10x-gpio";
                        interrupt-controller;
                        #interrupt-cells = <1>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <27 2>;
-                       reg = <0xFF14D000 0x1000>;
+                       reg = <0xff14d000 0x1000>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        abilis,ngpio = <5>;
index 37d88c5dd181fc5c0de3eb8bd7526d84e56f483c..05143ce9c120434a0d64ce892ed593dc038d8cf1 100644 (file)
        };
 
        soc100 {
-               uart@FF100000 {
+               uart@ff100000 {
                        pinctrl-names = "default";
                        pinctrl-0 = <&pctl_uart0>;
                };
-               ethernet@FE100000 {
+               ethernet@fe100000 {
                        phy-mode = "rgmii";
                };
 
-               i2c0: i2c@FF120000 {
+               i2c0: i2c@ff120000 {
                        i2c-sda-hold-time-ns = <432>;
                };
-               i2c1: i2c@FF121000 {
+               i2c1: i2c@ff121000 {
                        i2c-sda-hold-time-ns = <432>;
                };
-               i2c2: i2c@FF122000 {
+               i2c2: i2c@ff122000 {
                        i2c-sda-hold-time-ns = <432>;
                };
-               i2c3: i2c@FF123000 {
+               i2c3: i2c@ff123000 {
                        i2c-sda-hold-time-ns = <432>;
                };
-               i2c4: i2c@FF124000 {
+               i2c4: i2c@ff124000 {
                        i2c-sda-hold-time-ns = <432>;
                };
 
index 3121536b25a375883a5eca1b94a3e2a4db372680..2fbf1bdfe6de815f0865865338f225a897a204c5 100644 (file)
@@ -54,7 +54,7 @@
                #size-cells     = <1>;
                device_type     = "soc";
                ranges          = <0xfe000000 0xfe000000 0x02000000
-                               0x000F0000 0x000F0000 0x00010000>;
+                               0x000f0000 0x000f0000 0x00010000>;
                compatible      = "abilis,tb10x", "simple-bus";
 
                pll0: oscillator {
                        clock-output-names = "ahb_clk";
                };
 
-               iomux: iomux@FF10601c {
+               iomux: iomux@ff10601c {
                        compatible = "abilis,tb10x-iomux";
                        #gpio-range-cells = <3>;
-                       reg = <0xFF10601c 0x4>;
+                       reg = <0xff10601c 0x4>;
                };
 
                intc: interrupt-controller {
@@ -88,7 +88,7 @@
                };
                tb10x_ictl: pic@fe002000 {
                        compatible = "abilis,tb10x-ictl";
-                       reg = <0xFE002000 0x20>;
+                       reg = <0xfe002000 0x20>;
                        interrupt-controller;
                        #interrupt-cells = <2>;
                        interrupt-parent = <&intc>;
                                        20 21 22 23 24 25 26 27 28 29 30 31>;
                };
 
-               uart@FF100000 {
+               uart@ff100000 {
                        compatible = "snps,dw-apb-uart";
-                       reg = <0xFF100000 0x100>;
+                       reg = <0xff100000 0x100>;
                        clock-frequency = <166666666>;
                        interrupts = <25 8>;
                        reg-shift = <2>;
                        reg-io-width = <4>;
                        interrupt-parent = <&tb10x_ictl>;
                };
-               ethernet@FE100000 {
+               ethernet@fe100000 {
                        compatible = "snps,dwmac-3.70a","snps,dwmac";
-                       reg = <0xFE100000 0x1058>;
+                       reg = <0xfe100000 0x1058>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <6 8>;
                        interrupt-names = "macirq";
                        clocks = <&ahb_clk>;
                        clock-names = "stmmaceth";
                };
-               dma@FE000000 {
+               dma@fe000000 {
                        compatible = "snps,dma-spear1340";
-                       reg = <0xFE000000 0x400>;
+                       reg = <0xfe000000 0x400>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <14 8>;
                        dma-channels = <6>;
                        multi-block = <1 1 1 1 1 1>;
                };
 
-               i2c0: i2c@FF120000 {
+               i2c0: i2c@ff120000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "snps,designware-i2c";
-                       reg = <0xFF120000 0x1000>;
+                       reg = <0xff120000 0x1000>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <12 8>;
                        clocks = <&ahb_clk>;
                };
-               i2c1: i2c@FF121000 {
+               i2c1: i2c@ff121000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "snps,designware-i2c";
-                       reg = <0xFF121000 0x1000>;
+                       reg = <0xff121000 0x1000>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <12 8>;
                        clocks = <&ahb_clk>;
                };
-               i2c2: i2c@FF122000 {
+               i2c2: i2c@ff122000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "snps,designware-i2c";
-                       reg = <0xFF122000 0x1000>;
+                       reg = <0xff122000 0x1000>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <12 8>;
                        clocks = <&ahb_clk>;
                };
-               i2c3: i2c@FF123000 {
+               i2c3: i2c@ff123000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "snps,designware-i2c";
-                       reg = <0xFF123000 0x1000>;
+                       reg = <0xff123000 0x1000>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <12 8>;
                        clocks = <&ahb_clk>;
                };
-               i2c4: i2c@FF124000 {
+               i2c4: i2c@ff124000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "snps,designware-i2c";
-                       reg = <0xFF124000 0x1000>;
+                       reg = <0xff124000 0x1000>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <12 8>;
                        clocks = <&ahb_clk>;
                };
 
-               spi0: spi@0xFE010000 {
+               spi0: spi@fe010000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        cell-index = <0>;
                        compatible = "abilis,tb100-spi";
                        num-cs = <1>;
-                       reg = <0xFE010000 0x20>;
+                       reg = <0xfe010000 0x20>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <26 8>;
                        clocks = <&ahb_clk>;
                };
-               spi1: spi@0xFE011000 {
+               spi1: spi@fe011000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        cell-index = <1>;
                        compatible = "abilis,tb100-spi";
                        num-cs = <2>;
-                       reg = <0xFE011000 0x20>;
+                       reg = <0xfe011000 0x20>;
                        interrupt-parent = <&tb10x_ictl>;
                        interrupts = <10 8>;
                        clocks = <&ahb_clk>;
                        interrupts = <20 2>, <19 2>;
                        interrupt-names = "cmd_irq", "event_irq";
                };
-               tb10x_mdsc0: tb10x-mdscr@FF300000 {
+               tb10x_mdsc0: tb10x-mdscr@ff300000 {
                        compatible = "abilis,tb100-mdscr";
-                       reg = <0xFF300000 0x7000>;
+                       reg = <0xff300000 0x7000>;
                        tb100-mdscr-manage-tsin;
                };
-               tb10x_mscr0: tb10x-mdscr@FF307000 {
+               tb10x_mscr0: tb10x-mdscr@ff307000 {
                        compatible = "abilis,tb100-mdscr";
-                       reg = <0xFF307000 0x7000>;
+                       reg = <0xff307000 0x7000>;
                };
                tb10x_scr0: tb10x-mdscr@ff30e000 {
                        compatible = "abilis,tb100-mdscr";
-                       reg = <0xFF30e000 0x4000>;
+                       reg = <0xff30e000 0x4000>;
                        tb100-mdscr-manage-tsin;
                };
                tb10x_scr1: tb10x-mdscr@ff312000 {
                        compatible = "abilis,tb100-mdscr";
-                       reg = <0xFF312000 0x4000>;
+                       reg = <0xff312000 0x4000>;
                        tb100-mdscr-manage-tsin;
                };
                tb10x_wfb: tb10x-wfb@ff319000 {
index fdc266504ada273e6efaf72c18cc8c2e2f48edf2..37be3bf03ad632f75214f82ff984d9f781e68358 100644 (file)
@@ -41,7 +41,7 @@
                 * this GPIO block ORs all interrupts on CPU card (creg,..)
                 * to uplink only 1 IRQ to ARC core intc
                 */
-               dw-apb-gpio@0x2000 {
+               dw-apb-gpio@2000 {
                        compatible = "snps,dw-apb-gpio";
                        reg = < 0x2000 0x80 >;
                        #address-cells = <1>;
@@ -60,7 +60,7 @@
                        };
                };
 
-               debug_uart: dw-apb-uart@0x5000 {
+               debug_uart: dw-apb-uart@5000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x5000 0x100>;
                        clock-frequency = <33333000>;
@@ -88,7 +88,7 @@
         * avoid duplicating the MB dtsi file given that IRQ from
         * this intc to cpu intc are different for axs101 and axs103
         */
-       mb_intc: dw-apb-ictl@0xe0012000 {
+       mb_intc: dw-apb-ictl@e0012000 {
                #interrupt-cells = <1>;
                compatible = "snps,dw-apb-ictl";
                reg = < 0x0 0xe0012000 0x0 0x200 >;
index d75d65ddf8e31db78c58fa9882b90c2e6be2ed4b..effa37536d7ad3a02668e0455ed93220c245c2cb 100644 (file)
@@ -55,7 +55,7 @@
                 * this GPIO block ORs all interrupts on CPU card (creg,..)
                 * to uplink only 1 IRQ to ARC core intc
                 */
-               dw-apb-gpio@0x2000 {
+               dw-apb-gpio@2000 {
                        compatible = "snps,dw-apb-gpio";
                        reg = < 0x2000 0x80 >;
                        #address-cells = <1>;
@@ -74,7 +74,7 @@
                        };
                };
 
-               debug_uart: dw-apb-uart@0x5000 {
+               debug_uart: dw-apb-uart@5000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x5000 0x100>;
                        clock-frequency = <33333000>;
         * external DMA buffer located outside of IOC aperture.
         */
        axs10x_mb {
-               ethernet@0x18000 {
+               ethernet@18000 {
                        dma-coherent;
                };
 
-               ehci@0x40000 {
+               ehci@40000 {
                        dma-coherent;
                };
 
-               ohci@0x60000 {
+               ohci@60000 {
                        dma-coherent;
                };
 
-               mmc@0x15000 {
+               mmc@15000 {
                        dma-coherent;
                };
        };
         * avoid duplicating the MB dtsi file given that IRQ from
         * this intc to cpu intc are different for axs101 and axs103
         */
-       mb_intc: dw-apb-ictl@0xe0012000 {
+       mb_intc: dw-apb-ictl@e0012000 {
                #interrupt-cells = <1>;
                compatible = "snps,dw-apb-ictl";
                reg = < 0x0 0xe0012000 0x0 0x200 >;
                #size-cells = <2>;
                ranges;
                /*
-                * Move frame buffer out of IOC aperture (0x8z-0xAz).
+                * Move frame buffer out of IOC aperture (0x8z-0xaz).
                 */
                frame_buffer: frame_buffer@be000000 {
                        compatible = "shared-dma-pool";
index a05bb737ea6392f5e77cd3830dceb8afe620943e..e401e59f61802f2ef33fcb12854a7f97f896200d 100644 (file)
@@ -62,7 +62,7 @@
                 * this GPIO block ORs all interrupts on CPU card (creg,..)
                 * to uplink only 1 IRQ to ARC core intc
                 */
-               dw-apb-gpio@0x2000 {
+               dw-apb-gpio@2000 {
                        compatible = "snps,dw-apb-gpio";
                        reg = < 0x2000 0x80 >;
                        #address-cells = <1>;
@@ -81,7 +81,7 @@
                        };
                };
 
-               debug_uart: dw-apb-uart@0x5000 {
+               debug_uart: dw-apb-uart@5000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x5000 0x100>;
                        clock-frequency = <33333000>;
         * external DMA buffer located outside of IOC aperture.
         */
        axs10x_mb {
-               ethernet@0x18000 {
+               ethernet@18000 {
                        dma-coherent;
                };
 
-               ehci@0x40000 {
+               ehci@40000 {
                        dma-coherent;
                };
 
-               ohci@0x60000 {
+               ohci@60000 {
                        dma-coherent;
                };
 
-               mmc@0x15000 {
+               mmc@15000 {
                        dma-coherent;
                };
        };
         * avoid duplicating the MB dtsi file given that IRQ from
         * this intc to cpu intc are different for axs101 and axs103
         */
-       mb_intc: dw-apb-ictl@0xe0012000 {
+       mb_intc: dw-apb-ictl@e0012000 {
                #interrupt-cells = <1>;
                compatible = "snps,dw-apb-ictl";
                reg = < 0x0 0xe0012000 0x0 0x200 >;
                #size-cells = <2>;
                ranges;
                /*
-                * Move frame buffer out of IOC aperture (0x8z-0xAz).
+                * Move frame buffer out of IOC aperture (0x8z-0xaz).
                 */
                frame_buffer: frame_buffer@be000000 {
                        compatible = "shared-dma-pool";
index 37bafd44e36d0fed9b85e80ea356cd78df0c1872..4ead6dc9af2f7e3823b332ee9d7ed5df5b920d25 100644 (file)
@@ -72,7 +72,7 @@
                        };
                };
 
-               gmac: ethernet@0x18000 {
+               gmac: ethernet@18000 {
                        #interrupt-cells = <1>;
                        compatible = "snps,dwmac";
                        reg = < 0x18000 0x2000 >;
                        mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
                };
 
-               ehci@0x40000 {
+               ehci@40000 {
                        compatible = "generic-ehci";
                        reg = < 0x40000 0x100 >;
                        interrupts = < 8 >;
                };
 
-               ohci@0x60000 {
+               ohci@60000 {
                        compatible = "generic-ohci";
                        reg = < 0x60000 0x100 >;
                        interrupts = < 8 >;
                 * dw_mci_pltfm_prepare_command() is used in generic platform
                 * code.
                 */
-               mmc@0x15000 {
+               mmc@15000 {
                        compatible = "altr,socfpga-dw-mshc";
                        reg = < 0x15000 0x400 >;
                        fifo-depth = < 16 >;
                        bus-width = < 4 >;
                };
 
-               uart@0x20000 {
+               uart@20000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x20000 0x100>;
                        clock-frequency = <33333333>;
                        reg-io-width = <4>;
                };
 
-               uart@0x21000 {
+               uart@21000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x21000 0x100>;
                        clock-frequency = <33333333>;
                };
 
                /* UART muxed with USB data port (ttyS3) */
-               uart@0x22000 {
+               uart@22000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x22000 0x100>;
                        clock-frequency = <33333333>;
                        reg-io-width = <4>;
                };
 
-               i2c@0x1d000 {
+               i2c@1d000 {
                        compatible = "snps,designware-i2c";
                        reg = <0x1d000 0x100>;
                        clock-frequency = <400000>;
                        #sound-dai-cells = <0>;
                };
 
-               i2c@0x1f000 {
+               i2c@1f000 {
                        compatible = "snps,designware-i2c";
                        #address-cells = <1>;
                        #size-cells = <0>;
                                };
                        };
 
-                       eeprom@0x54{
+                       eeprom@54{
                                compatible = "atmel,24c01";
                                reg = <0x54>;
                                pagesize = <0x8>;
                        };
 
-                       eeprom@0x57{
+                       eeprom@57{
                                compatible = "atmel,24c04";
                                reg = <0x57>;
                                pagesize = <0x8>;
index 43f17b51ee89cca00a0b2eebb7ed045d49de03a0..7425bb0f2d1b6a8942086bce942f86108c3ddd80 100644 (file)
@@ -18,8 +18,8 @@
        model = "snps,hsdk";
        compatible = "snps,hsdk";
 
-       #address-cells = <1>;
-       #size-cells = <1>;
+       #address-cells = <2>;
+       #size-cells = <2>;
 
        chosen {
                bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
                #size-cells = <1>;
                interrupt-parent = <&idu_intc>;
 
-               ranges = <0x00000000 0xf0000000 0x10000000>;
+               ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
 
                cgu_rst: reset-controller@8a0 {
                        compatible = "snps,hsdk-reset";
                        #reset-cells = <1>;
-                       reg = <0x8A0 0x4>, <0xFF0 0x4>;
+                       reg = <0x8a0 0x4>, <0xff0 0x4>;
                };
 
                core_clk: core-clk@0 {
                        compatible = "snps,hsdk-core-pll-clock";
-                       reg = <0x00 0x10>, <0x14B8 0x4>;
+                       reg = <0x00 0x10>, <0x14b8 0x4>;
                        #clock-cells = <0>;
                        clocks = <&input_clk>;
 
                        #clock-cells = <0>;
                };
 
+               dmac_core_clk: dmac-core-clk {
+                       compatible = "fixed-clock";
+                       clock-frequency = <400000000>;
+                       #clock-cells = <0>;
+               };
+
+               dmac_cfg_clk: dmac-gpu-cfg-clk {
+                       compatible = "fixed-clock";
+                       clock-frequency = <200000000>;
+                       #clock-cells = <0>;
+               };
+
                gmac: ethernet@8000 {
                        #interrupt-cells = <1>;
                        compatible = "snps,dwmac";
                        compatible = "snps,hsdk-v1.0-ohci", "generic-ohci";
                        reg = <0x60000 0x100>;
                        interrupts = <15>;
+                       resets = <&cgu_rst HSDK_USB_RESET>;
                        dma-coherent;
                };
 
                        compatible = "snps,hsdk-v1.0-ehci", "generic-ehci";
                        reg = <0x40000 0x100>;
                        interrupts = <15>;
+                       resets = <&cgu_rst HSDK_USB_RESET>;
                        dma-coherent;
                };
 
                                reg = <0>;
                        };
                };
+
+               dmac: dmac@80000 {
+                       compatible = "snps,axi-dma-1.01a";
+                       reg = <0x80000 0x400>;
+                       interrupts = <27>;
+                       clocks = <&dmac_core_clk>, <&dmac_cfg_clk>;
+                       clock-names = "core-clk", "cfgr-clk";
+
+                       dma-channels = <4>;
+                       snps,dma-masters = <2>;
+                       snps,data-width = <3>;
+                       snps,block-size = <4096 4096 4096 4096>;
+                       snps,priority = <0 1 2 3>;
+                       snps,axi-max-burst-len = <16>;
+               };
        };
 
        memory@80000000 {
-               #address-cells = <1>;
-               #size-cells = <1>;
+               #address-cells = <2>;
+               #size-cells = <2>;
                device_type = "memory";
-               reg = <0x80000000 0x40000000>;  /* 1 GiB */
+               reg = <0x0 0x80000000 0x0 0x40000000>;  /* 1 GB lowmem */
+               /*     0x1 0x00000000 0x0 0x40000000>;     1 GB highmem */
        };
 };
index 0fd6ba985b164b7752c26544e5d4b6d9684c88be..84e8766c8ca2c6144167ea48e969145e6bebfe71 100644 (file)
@@ -36,7 +36,7 @@
                        #interrupt-cells = <1>;
                };
 
-               debug_uart: dw-apb-uart@0x5000 {
+               debug_uart: dw-apb-uart@5000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x5000 0x100>;
                        clock-frequency = <2403200>;
@@ -49,7 +49,7 @@
 
        };
 
-       mb_intc: dw-apb-ictl@0xe0012000 {
+       mb_intc: dw-apb-ictl@e0012000 {
                #interrupt-cells = <1>;
                compatible = "snps,dw-apb-ictl";
                reg = < 0xe0012000 0x200 >;
index 28956f9a9f3db7e12042821ea72d5a4af0514e2a..eb7e705e8a2789722a449e5abf5d5ad122619f99 100644 (file)
@@ -44,7 +44,7 @@
                        #interrupt-cells = <1>;
                };
 
-               debug_uart: dw-apb-uart@0x5000 {
+               debug_uart: dw-apb-uart@5000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x5000 0x100>;
                        clock-frequency = <2403200>;
@@ -57,7 +57,7 @@
 
        };
 
-       mb_intc: dw-apb-ictl@0xe0012000 {
+       mb_intc: dw-apb-ictl@e0012000 {
                #interrupt-cells = <1>;
                compatible = "snps,dw-apb-ictl";
                reg = < 0xe0012000 0x200 >;
index 48bb4b4cd234ed65a8b4002dbf060f3bc09b7e0d..925d5cc95dbbbf1419b920d4d3e73743ec794dd4 100644 (file)
@@ -36,7 +36,7 @@
                        };
                };
 
-               ethernet@0x18000 {
+               ethernet@18000 {
                        #interrupt-cells = <1>;
                        compatible = "snps,dwmac";
                        reg = < 0x18000 0x2000 >;
                        clock-names = "stmmaceth";
                };
 
-               ehci@0x40000 {
+               ehci@40000 {
                        compatible = "generic-ehci";
                        reg = < 0x40000 0x100 >;
                        interrupts = < 8 >;
                };
 
-               uart@0x20000 {
+               uart@20000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x20000 0x100>;
                        clock-frequency = <2403200>;
@@ -65,7 +65,7 @@
                        reg-io-width = <4>;
                };
 
-               uart@0x21000 {
+               uart@21000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x21000 0x100>;
                        clock-frequency = <2403200>;
@@ -75,7 +75,7 @@
                        reg-io-width = <4>;
                };
 
-               uart@0x22000 {
+               uart@22000 {
                        compatible = "snps,dw-apb-uart";
                        reg = <0x22000 0x100>;
                        clock-frequency = <2403200>;
                        interrupt-names = "arc_ps2_irq";
                };
 
-               mmc@0x15000 {
+               mmc@15000 {
                        compatible = "snps,dw-mshc";
                        reg = <0x15000 0x400>;
                        fifo-depth = <1024>;
         * Embedded Vision subsystem UIO mappings; only relevant for EV VDK
         *
         * This node is intentionally put outside of MB above becase
-        * it maps areas outside of MB's 0xEz-0xFz.
+        * it maps areas outside of MB's 0xez-0xfz.
         */
-       uio_ev: uio@0xD0000000 {
+       uio_ev: uio@d0000000 {
                compatible = "generic-uio";
-               reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>;
+               reg = <0xd0000000 0x2000 0xd1000000 0x2000 0x90000000 0x10000000 0xc0000000 0x10000000>;
                reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem";
                interrupt-parent = <&mb_intc>;
                interrupts = <23>;
index 6fd3d29546afd2e2f76f40e5a0af57757a5c369a..0e5fd29ed238b5a4dc715a364bde2d9ee88edb58 100644 (file)
@@ -8,6 +8,7 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_RAM=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index b41f8881ecc811f2005b763958e779a42da15c36..decc306a3b52c2b96c2e7af7108e7aa171c51c31 100644 (file)
@@ -11,6 +11,7 @@ generic-y += hardirq.h
 generic-y += hw_irq.h
 generic-y += irq_regs.h
 generic-y += irq_work.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
index a27eafdc82602f6856b00c5fbd82db72c33cf238..a7d4be87b2f0a8440307f87b00729839d1c9ab3e 100644 (file)
@@ -82,6 +82,7 @@
 #define ECR_V_DTLB_MISS                        0x05
 #define ECR_V_PROTV                    0x06
 #define ECR_V_TRAP                     0x09
+#define ECR_V_MISALIGN                 0x0d
 #endif
 
 /* DTLB Miss and Protection Violation Cause Codes */
@@ -167,14 +168,6 @@ struct bcr_mpy {
 #endif
 };
 
-struct bcr_extn_xymem {
-#ifdef CONFIG_CPU_BIG_ENDIAN
-       unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8;
-#else
-       unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2;
-#endif
-};
-
 struct bcr_iccm_arcompact {
 #ifdef CONFIG_CPU_BIG_ENDIAN
        unsigned int base:16, pad:5, sz:3, ver:8;
@@ -312,7 +305,7 @@ struct cpuinfo_arc {
        struct cpuinfo_arc_bpu bpu;
        struct bcr_identity core;
        struct bcr_isa_arcv2 isa;
-       const char *details, *name;
+       const char *release, *name;
        unsigned int vec_base;
        struct cpuinfo_arc_ccm iccm, dccm;
        struct {
@@ -322,7 +315,6 @@ struct cpuinfo_arc {
                             timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
        } extn;
        struct bcr_mpy extn_mpy;
-       struct bcr_extn_xymem extn_xymem;
 };
 
 extern struct cpuinfo_arc cpuinfo_arc700[];
index 8a4f77ea3238e6f017ae24ab6b55e2952637fe04..e66d0339e1d8617ac30050f53d26d926243266e6 100644 (file)
 #define ARCV2_IRQ_DEF_PRIO     1
 
 /* seed value for status register */
-#define ISA_INIT_STATUS_BITS   (STATUS_IE_MASK | STATUS_AD_MASK | \
+#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
+#define __AD_ENB       STATUS_AD_MASK
+#else
+#define __AD_ENB       0
+#endif
+
+#define ISA_INIT_STATUS_BITS   (STATUS_IE_MASK | __AD_ENB | \
                                        (ARCV2_IRQ_DEF_PRIO << 1))
 
 #ifndef __ASSEMBLY__
index 6958545390f0f847ed3a7745b7325964d7f23f17..9cd7ee4fad390e7806a812b715d2ac90bbe0e56f 100644 (file)
@@ -105,10 +105,10 @@ static const char * const arc_pmu_ev_hw_map[] = {
        [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
        /* All jump instructions that are taken */
        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
-       [PERF_COUNT_ARC_BPOK]         = "bpok",   /* NP-NT, PT-T, PNT-NT */
 #ifdef CONFIG_ISA_ARCV2
        [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
 #else
+       [PERF_COUNT_ARC_BPOK]         = "bpok",   /* NP-NT, PT-T, PNT-NT */
        [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
 #endif
        [PERF_COUNT_ARC_LDC] = "imemrdc",       /* Instr: mem read cached */
index 2ba04a7db62128148ac303e79e95c2cb2ee2d534..daa914da796886de6a3ae3744e3428d30f3804c7 100644 (file)
@@ -21,8 +21,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
        unsigned int val;
 
-       smp_mb();
-
        __asm__ __volatile__(
        "1:     llock   %[val], [%[slock]]      \n"
        "       breq    %[val], %[LOCKED], 1b   \n"     /* spin while LOCKED */
@@ -34,6 +32,14 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
          [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
        : "memory", "cc");
 
+       /*
+        * ACQUIRE barrier to ensure load/store after taking the lock
+        * don't "bleed-up" out of the critical section (leak-in is allowed)
+        * http://www.spinics.net/lists/kernel/msg2010409.html
+        *
+        * ARCv2 only has load-load, store-store and all-all barrier
+        * thus need the full all-all barrier
+        */
        smp_mb();
 }
 
@@ -42,8 +48,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
 {
        unsigned int val, got_it = 0;
 
-       smp_mb();
-
        __asm__ __volatile__(
        "1:     llock   %[val], [%[slock]]      \n"
        "       breq    %[val], %[LOCKED], 4f   \n"     /* already LOCKED, just bail */
@@ -67,9 +71,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
        smp_mb();
 
-       lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
-
-       smp_mb();
+       WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
 }
 
 /*
@@ -81,8 +83,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        unsigned int val;
 
-       smp_mb();
-
        /*
         * zero means writer holds the lock exclusively, deny Reader.
         * Otherwise grant lock to first/subseq reader
@@ -113,8 +113,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
        unsigned int val, got_it = 0;
 
-       smp_mb();
-
        __asm__ __volatile__(
        "1:     llock   %[val], [%[rwlock]]     \n"
        "       brls    %[val], %[WR_LOCKED], 4f\n"     /* <= 0: already write locked, bail */
@@ -140,8 +138,6 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
 {
        unsigned int val;
 
-       smp_mb();
-
        /*
         * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
         * deny writer. Otherwise if unlocked grant to writer
@@ -175,8 +171,6 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
        unsigned int val, got_it = 0;
 
-       smp_mb();
-
        __asm__ __volatile__(
        "1:     llock   %[val], [%[rwlock]]     \n"
        "       brne    %[val], %[UNLOCKED], 4f \n"     /* !UNLOCKED, bail */
@@ -217,17 +211,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
        : [val]         "=&r"   (val)
        : [rwlock]      "r"     (&(rw->counter))
        : "memory", "cc");
-
-       smp_mb();
 }
 
 static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
        smp_mb();
 
-       rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
-
-       smp_mb();
+       WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
 }
 
 #else  /* !CONFIG_ARC_HAS_LLSC */
@@ -237,10 +227,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
        unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
 
        /*
-        * This smp_mb() is technically superfluous, we only need the one
-        * after the lock for providing the ACQUIRE semantics.
-        * However doing the "right" thing was regressing hackbench
-        * so keeping this, pending further investigation
+        * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
+        * for ACQ and REL semantics respectively. However EX based spinlocks
+        * need the extra smp_mb to workaround a hardware quirk.
         */
        smp_mb();
 
@@ -257,14 +246,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
 #endif
        : "memory");
 
-       /*
-        * ACQUIRE barrier to ensure load/store after taking the lock
-        * don't "bleed-up" out of the critical section (leak-in is allowed)
-        * http://www.spinics.net/lists/kernel/msg2010409.html
-        *
-        * ARCv2 only has load-load, store-store and all-all barrier
-        * thus need the full all-all barrier
-        */
        smp_mb();
 }
 
@@ -309,8 +290,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
        : "memory");
 
        /*
-        * superfluous, but keeping for now - see pairing version in
-        * arch_spin_lock above
+        * see pairing version/comment in arch_spin_lock above
         */
        smp_mb();
 }
@@ -344,7 +324,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
        arch_spin_unlock(&(rw->lock_mutex));
        local_irq_restore(flags);
 
-       smp_mb();
        return ret;
 }
 
index 29de098043064a20112dd7ffd02f86059261ae44..c7a4201ed62ba70f9f37275475be215b0e7fb6d1 100644 (file)
@@ -55,12 +55,11 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
  */
 static inline void
 syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, unsigned long *args)
+                     unsigned long *args)
 {
        unsigned long *inside_ptregs = &(regs->r0);
-       inside_ptregs -= i;
-
-       BUG_ON((i + n) > 6);
+       unsigned int n = 6;
+       unsigned int i = 0;
 
        while (n--) {
                args[i++] = (*inside_ptregs);
index a9db5f62aaf37988fe8806ac4ee0a14edf713309..90cac97643a46949fd00f3d2678eebb4a10c525b 100644 (file)
@@ -9,38 +9,6 @@
 #ifndef _ASM_ARC_TLB_H
 #define _ASM_ARC_TLB_H
 
-#define tlb_flush(tlb)                         \
-do {                                           \
-       if (tlb->fullmm)                        \
-               flush_tlb_mm((tlb)->mm);        \
-} while (0)
-
-/*
- * This pair is called at time of munmap/exit to flush cache and TLB entries
- * for mappings being torn down.
- * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$
- * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
- *
- * Note, read http://lkml.org/lkml/2004/1/15/6
- */
-#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
-#define tlb_start_vma(tlb, vma)
-#else
-#define tlb_start_vma(tlb, vma)                                                \
-do {                                                                   \
-       if (!tlb->fullmm)                                               \
-               flush_cache_range(vma, vma->vm_start, vma->vm_end);     \
-} while(0)
-#endif
-
-#define tlb_end_vma(tlb, vma)                                          \
-do {                                                                   \
-       if (!tlb->fullmm)                                               \
-               flush_tlb_range(vma, vma->vm_start, vma->vm_end);       \
-} while (0)
-
-#define __tlb_remove_tlb_entry(tlb, ptep, address)
-
 #include <linux/pagemap.h>
 #include <asm-generic/tlb.h>
 
index 755bb11323d8feb92d12349bf444f151cd8acc29..1c72f04ff75da1a7f6918f00b14116a183a79313 100644 (file)
@@ -1,2 +1 @@
-generic-y += kvm_para.h
 generic-y += ucontext.h
index 30e090625916160acb23df6bfa44e86bcad7192f..8f6e0447dd1702b571b23a3f561f8ed032ae6abf 100644 (file)
        ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
        ; by default
        lr      r5, [status32]
+#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
        bset    r5, r5, STATUS_AD_BIT
+#else
+       ; Although disabled at reset, bootloader might have enabled it
+       bclr    r5, r5, STATUS_AD_BIT
+#endif
        kflag   r5
 #endif
 .endm
@@ -106,6 +111,7 @@ ENTRY(stext)
        ;    r2 = pointer to uboot provided cmdline or external DTB in mem
        ; These are handled later in handle_uboot_args()
        st      r0, [@uboot_tag]
+       st      r1, [@uboot_magic]
        st      r2, [@uboot_arg]
 
        ; setup "current" tsk and optionally cache it in dedicated r25
index cf18b3e5a934d34c684edcc7aa84533a10f932bf..c0d0124de089b4a456a98c20c5ab6f78913ab36d 100644 (file)
@@ -95,7 +95,7 @@ void arc_init_IRQ(void)
 
        /* setup status32, don't enable intr yet as kernel doesn't want */
        tmp = read_aux_reg(ARC_REG_STATUS32);
-       tmp |= STATUS_AD_MASK | (ARCV2_IRQ_DEF_PRIO << 1);
+       tmp |= ARCV2_IRQ_DEF_PRIO << 1;
        tmp &= ~STATUS_IE_MASK;
        asm volatile("kflag %0  \n"::"r"(tmp));
 }
index 7b2340996cf80fc4ddc382c55d86acbe37d49bf5..a9c88b7e9182f6232b3f319a00865c8c9946b38f 100644 (file)
@@ -36,6 +36,7 @@ unsigned int intr_to_DE_cnt;
 
 /* Part of U-boot ABI: see head.S */
 int __initdata uboot_tag;
+int __initdata uboot_magic;
 char __initdata *uboot_arg;
 
 const struct machine_desc *machine_desc;
@@ -44,29 +45,24 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
 
 struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
 
-static const struct id_to_str arc_cpu_rel[] = {
+static const struct id_to_str arc_legacy_rel[] = {
+       /* ID.ARCVER,   Release */
 #ifdef CONFIG_ISA_ARCOMPACT
-       { 0x34, "R4.10"},
-       { 0x35, "R4.11"},
+       { 0x34,         "R4.10"},
+       { 0x35,         "R4.11"},
 #else
-       { 0x51, "R2.0" },
-       { 0x52, "R2.1" },
-       { 0x53, "R3.0" },
-       { 0x54, "R3.10a" },
+       { 0x51,         "R2.0" },
+       { 0x52,         "R2.1" },
+       { 0x53,         "R3.0" },
 #endif
-       { 0x00, NULL   }
+       { 0x00,         NULL   }
 };
 
-static const struct id_to_str arc_cpu_nm[] = {
-#ifdef CONFIG_ISA_ARCOMPACT
-       { 0x20, "ARC 600"   },
-       { 0x30, "ARC 770"   },  /* 750 identified seperately */
-#else
-       { 0x40, "ARC EM"  },
-       { 0x50, "ARC HS38"  },
-       { 0x54, "ARC HS48"  },
-#endif
-       { 0x00, "Unknown"   }
+static const struct id_to_str arc_cpu_rel[] = {
+       /* UARCH.MAJOR, Release */
+       {  0,           "R3.10a"},
+       {  1,           "R3.50a"},
+       {  0xFF,        NULL   }
 };
 
 static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
@@ -116,31 +112,72 @@ static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
        }
 }
 
+static void decode_arc_core(struct cpuinfo_arc *cpu)
+{
+       struct bcr_uarch_build_arcv2 uarch;
+       const struct id_to_str *tbl;
+
+       /*
+        * Up until (including) the first core4 release (0x54) things were
+        * simple: AUX IDENTITY.ARCVER was sufficient to identify arc family
+        * and release: 0x50 to 0x53 was HS38, 0x54 was HS48 (dual issue)
+        */
+
+       if (cpu->core.family < 0x54) { /* includes arc700 */
+
+               for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) {
+                       if (cpu->core.family == tbl->id) {
+                               cpu->release = tbl->str;
+                               break;
+                       }
+               }
+
+               if (is_isa_arcompact())
+                       cpu->name = "ARC700";
+               else if (tbl->str)
+                       cpu->name = "HS38";
+               else
+                       cpu->name = cpu->release = "Unknown";
+
+               return;
+       }
+
+       /*
+        * However the subsequent HS release (same 0x54) allow HS38 or HS48
+        * configurations and encode this info in a different BCR.
+        * The BCR was introduced in 0x54 so can't be read unconditionally.
+        */
+
+       READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
+
+       if (uarch.prod == 4) {
+               cpu->name = "HS48";
+               cpu->extn.dual = 1;
+
+       } else {
+               cpu->name = "HS38";
+       }
+
+       for (tbl = &arc_cpu_rel[0]; tbl->id != 0xFF; tbl++) {
+               if (uarch.maj == tbl->id) {
+                       cpu->release = tbl->str;
+                       break;
+               }
+       }
+}
+
 static void read_arc_build_cfg_regs(void)
 {
        struct bcr_timer timer;
        struct bcr_generic bcr;
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
-       const struct id_to_str *tbl;
        struct bcr_isa_arcv2 isa;
        struct bcr_actionpoint ap;
 
        FIX_PTR(cpu);
 
        READ_BCR(AUX_IDENTITY, cpu->core);
-
-       for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
-               if (cpu->core.family == tbl->id) {
-                       cpu->details = tbl->str;
-                       break;
-               }
-       }
-
-       for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
-               if ((cpu->core.family & 0xF4) == tbl->id)
-                       break;
-       }
-       cpu->name = tbl->str;
+       decode_arc_core(cpu);
 
        READ_BCR(ARC_REG_TIMERS_BCR, timer);
        cpu->extn.timer0 = timer.t0;
@@ -151,16 +188,6 @@ static void read_arc_build_cfg_regs(void)
 
        READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
 
-       cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR) > 1 ? 1 : 0; /* 2,3 */
-       cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR) > 1 ? 1 : 0; /* 2,3 */
-       cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0;        /* 1,3 */
-       cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
-       cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
-       cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 :
-                               IS_ENABLED(CONFIG_ARC_HAS_SWAPE);
-
-       READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
-
        /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
        read_decode_ccm_bcr(cpu);
 
@@ -198,30 +225,12 @@ static void read_arc_build_cfg_regs(void)
                cpu->bpu.num_pred = 2048 << bpu.pte;
                cpu->bpu.ret_stk = 4 << bpu.rse;
 
-               if (cpu->core.family >= 0x54) {
-
-                       struct bcr_uarch_build_arcv2 uarch;
-
-                       /*
-                        * The first 0x54 core (uarch maj:min 0:1 or 0:2) was
-                        * dual issue only (HS4x). But next uarch rev (1:0)
-                        * allows it be configured for single issue (HS3x)
-                        * Ensure we fiddle with dual issue only on HS4x
-                        */
-                       READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
-
-                       if (uarch.prod == 4) {
-                               unsigned int exec_ctrl;
-
-                               /* dual issue hardware always present */
-                               cpu->extn.dual = 1;
-
-                               READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
+               /* if dual issue hardware, is it enabled ? */
+               if (cpu->extn.dual) {
+                       unsigned int exec_ctrl;
 
-                               /* dual issue hardware enabled ? */
-                               cpu->extn.dual_enb = !(exec_ctrl & 1);
-
-                       }
+                       READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
+                       cpu->extn.dual_enb = !(exec_ctrl & 1);
                }
        }
 
@@ -263,7 +272,8 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
 {
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
        struct bcr_identity *core = &cpu->core;
-       int i, n = 0, ua = 0;
+       char mpy_opt[16];
+       int n = 0;
 
        FIX_PTR(cpu);
 
@@ -272,7 +282,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
                       core->family, core->cpu_id, core->chip_id);
 
        n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
-                      cpu_id, cpu->name, cpu->details,
+                      cpu_id, cpu->name, cpu->release,
                       is_isa_arcompact() ? "ARCompact" : "ARCv2",
                       IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
                       IS_AVAIL3(cpu->extn.dual, cpu->extn.dual_enb, " Dual-Issue "));
@@ -283,61 +293,50 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
                       IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
                       IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
 
-#ifdef __ARC_UNALIGNED__
-       ua = 1;
-#endif
-       n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s%s",
-                          IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
-                          IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
-                          IS_AVAIL1(cpu->isa.unalign, "unalign "), IS_USED_RUN(ua));
-
-       if (i)
-               n += scnprintf(buf + n, len - n, "\n\t\t: ");
-
        if (cpu->extn_mpy.ver) {
-               if (cpu->extn_mpy.ver <= 0x2) { /* ARCompact */
-                       n += scnprintf(buf + n, len - n, "mpy ");
+               if (is_isa_arcompact()) {
+                       scnprintf(mpy_opt, 16, "mpy");
                } else {
+
                        int opt = 2;    /* stock MPY/MPYH */
 
                        if (cpu->extn_mpy.dsp)  /* OPT 7-9 */
                                opt = cpu->extn_mpy.dsp + 6;
 
-                       n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
+                       scnprintf(mpy_opt, 16, "mpy[opt %d] ", opt);
                }
        }
 
        n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
-                      IS_AVAIL1(cpu->isa.div_rem, "div_rem "),
-                      IS_AVAIL1(cpu->extn.norm, "norm "),
-                      IS_AVAIL1(cpu->extn.barrel, "barrel-shift "),
-                      IS_AVAIL1(cpu->extn.swap, "swap "),
-                      IS_AVAIL1(cpu->extn.minmax, "minmax "),
-                      IS_AVAIL1(cpu->extn.crc, "crc "),
-                      IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE));
-
-       if (cpu->bpu.ver)
+                      IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
+                      IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
+                      IS_AVAIL2(cpu->isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS),
+                      IS_AVAIL1(cpu->extn_mpy.ver, mpy_opt),
+                      IS_AVAIL1(cpu->isa.div_rem, "div_rem "));
+
+       if (cpu->bpu.ver) {
                n += scnprintf(buf + n, len - n,
                              "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
                              IS_AVAIL1(cpu->bpu.full, "full"),
                              IS_AVAIL1(!cpu->bpu.full, "partial"),
                              cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
 
-       if (is_isa_arcv2()) {
-               struct bcr_lpb lpb;
+               if (is_isa_arcv2()) {
+                       struct bcr_lpb lpb;
 
-               READ_BCR(ARC_REG_LPB_BUILD, lpb);
-               if (lpb.ver) {
-                       unsigned int ctl;
-                       ctl = read_aux_reg(ARC_REG_LPB_CTRL);
+                       READ_BCR(ARC_REG_LPB_BUILD, lpb);
+                       if (lpb.ver) {
+                               unsigned int ctl;
+                               ctl = read_aux_reg(ARC_REG_LPB_CTRL);
 
-                       n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s",
-                               lpb.entries,
-                               IS_DISABLED_RUN(!ctl));
+                               n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s",
+                                              lpb.entries,
+                                              IS_DISABLED_RUN(!ctl));
+                       }
                }
+               n += scnprintf(buf + n, len - n, "\n");
        }
 
-       n += scnprintf(buf + n, len - n, "\n");
        return buf;
 }
 
@@ -390,11 +389,6 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
                }
        }
 
-       n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
-                       EF_ARC_OSABI_CURRENT >> 8,
-                       EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
-                       "no-legacy-syscalls" : "64-bit data any register aligned");
-
        return buf;
 }
 
@@ -497,6 +491,8 @@ static inline bool uboot_arg_invalid(unsigned long addr)
 #define UBOOT_TAG_NONE         0
 #define UBOOT_TAG_CMDLINE      1
 #define UBOOT_TAG_DTB          2
+/* We always pass 0 as magic from U-boot */
+#define UBOOT_MAGIC_VALUE      0
 
 void __init handle_uboot_args(void)
 {
@@ -511,6 +507,11 @@ void __init handle_uboot_args(void)
                goto ignore_uboot_args;
        }
 
+       if (uboot_magic != UBOOT_MAGIC_VALUE) {
+               pr_warn(IGNORE_ARGS "non zero uboot magic\n");
+               goto ignore_uboot_args;
+       }
+
        if (uboot_tag != UBOOT_TAG_NONE &&
             uboot_arg_invalid((unsigned long)uboot_arg)) {
                pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
index 215f515442e03d53ee3a18ade4c62e2a06987b3b..b0aa8c02833137c42a95d2c48a187f45fc1fc479 100644 (file)
@@ -145,7 +145,8 @@ static void show_ecr_verbose(struct pt_regs *regs)
        } else if (vec == ECR_V_PROTV) {
                if (cause_code == ECR_C_PROTV_INST_FETCH)
                        pr_cont("Execute from Non-exec Page\n");
-               else if (cause_code == ECR_C_PROTV_MISALIG_DATA)
+               else if (cause_code == ECR_C_PROTV_MISALIG_DATA &&
+                        IS_ENABLED(CONFIG_ISA_ARCOMPACT))
                        pr_cont("Misaligned r/w from 0x%08lx\n", address);
                else
                        pr_cont("%s access not allowed on page\n",
@@ -161,6 +162,8 @@ static void show_ecr_verbose(struct pt_regs *regs)
                        pr_cont("Bus Error from Data Mem\n");
                else
                        pr_cont("Bus Error, check PRM\n");
+       } else if (vec == ECR_V_MISALIGN) {
+               pr_cont("Misaligned r/w from 0x%08lx\n", address);
 #endif
        } else if (vec == ECR_V_TRAP) {
                if (regs->ecr_param == 5)
index b1656d15609750910512c9e00799c8d736f665b2..f7537b466b23dea34ca3e5772239f96f206a9124 100644 (file)
@@ -8,4 +8,10 @@
 lib-y  := strchr-700.o strcpy-700.o strlen.o memcmp.o
 
 lib-$(CONFIG_ISA_ARCOMPACT)    += memcpy-700.o memset.o strcmp.o
-lib-$(CONFIG_ISA_ARCV2)                += memcpy-archs.o memset-archs.o strcmp-archs.o
+lib-$(CONFIG_ISA_ARCV2)                += memset-archs.o strcmp-archs.o
+
+ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
+lib-$(CONFIG_ISA_ARCV2)                +=memcpy-archs-unaligned.o
+else
+lib-$(CONFIG_ISA_ARCV2)                +=memcpy-archs.o
+endif
diff --git a/arch/arc/lib/memcpy-archs-unaligned.S b/arch/arc/lib/memcpy-archs-unaligned.S
new file mode 100644 (file)
index 0000000..28993a7
--- /dev/null
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ARCv2 memcpy implementation optimized for unaligned memory access using.
+ *
+ * Copyright (C) 2019 Synopsys
+ * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+ */
+
+#include <linux/linkage.h>
+
+#ifdef CONFIG_ARC_HAS_LL64
+# define LOADX(DST,RX)         ldd.ab  DST, [RX, 8]
+# define STOREX(SRC,RX)                std.ab  SRC, [RX, 8]
+# define ZOLSHFT               5
+# define ZOLAND                        0x1F
+#else
+# define LOADX(DST,RX)         ld.ab   DST, [RX, 4]
+# define STOREX(SRC,RX)                st.ab   SRC, [RX, 4]
+# define ZOLSHFT               4
+# define ZOLAND                        0xF
+#endif
+
+ENTRY_CFI(memcpy)
+       mov     r3, r0          ; don;t clobber ret val
+
+       lsr.f   lp_count, r2, ZOLSHFT
+       lpnz    @.Lcopy32_64bytes
+       ;; LOOP START
+       LOADX   (r6, r1)
+       LOADX   (r8, r1)
+       LOADX   (r10, r1)
+       LOADX   (r4, r1)
+       STOREX  (r6, r3)
+       STOREX  (r8, r3)
+       STOREX  (r10, r3)
+       STOREX  (r4, r3)
+.Lcopy32_64bytes:
+
+       and.f   lp_count, r2, ZOLAND ;Last remaining 31 bytes
+       lpnz    @.Lcopyremainingbytes
+       ;; LOOP START
+       ldb.ab  r5, [r1, 1]
+       stb.ab  r5, [r3, 1]
+.Lcopyremainingbytes:
+
+       j       [blink]
+END_CFI(memcpy)
index f230bb7092fdb3d7d98883ab7310db1b4bc56654..b3373f5c88e0bf9267af0cb9dbc7f5b0f6cf6be8 100644 (file)
 
 #else
 
-.macro PREALLOC_INSTR
+.macro PREALLOC_INSTR  reg, off
 .endm
 
-.macro PREFETCHW_INSTR
+.macro PREFETCHW_INSTR reg, off
 .endm
 
 #endif
index 4135abec3fb09cd714c4c48d056a010b37f58c48..63e6e65046992f1388a3ae44aae5150abb1ee3f7 100644 (file)
@@ -113,10 +113,24 @@ static void read_decode_cache_bcr_arcv2(int cpu)
        }
 
        READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
-       if (cbcr.c)
+       if (cbcr.c) {
                ioc_exists = 1;
-       else
+
+               /*
+                * As for today we don't support both IOC and ZONE_HIGHMEM enabled
+                * simultaneously. This happens because as of today IOC aperture covers
+                * only ZONE_NORMAL (low mem) and any dma transactions outside this
+                * region won't be HW coherent.
+                * If we want to use both IOC and ZONE_HIGHMEM we can use
+                * bounce_buffer to handle dma transactions to HIGHMEM.
+                * Also it is possible to modify dma_direct cache ops or increase IOC
+                * aperture size if we are planning to use HIGHMEM without PAE.
+                */
+               if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
+                       ioc_enable = 0;
+       } else {
                ioc_enable = 0;
+       }
 
        /* HS 2.0 didn't have AUX_VOL */
        if (cpuinfo_arc700[cpu].core.family > 0x51) {
@@ -1158,19 +1172,6 @@ noinline void __init arc_ioc_setup(void)
        if (!ioc_enable)
                return;
 
-       /*
-        * As for today we don't support both IOC and ZONE_HIGHMEM enabled
-        * simultaneously. This happens because as of today IOC aperture covers
-        * only ZONE_NORMAL (low mem) and any dma transactions outside this
-        * region won't be HW coherent.
-        * If we want to use both IOC and ZONE_HIGHMEM we can use
-        * bounce_buffer to handle dma transactions to HIGHMEM.
-        * Also it is possible to modify dma_direct cache ops or increase IOC
-        * aperture size if we are planning to use HIGHMEM without PAE.
-        */
-       if (IS_ENABLED(CONFIG_HIGHMEM))
-               panic("IOC and HIGHMEM can't be used simultaneously");
-
        /* Flush + invalidate + disable L1 dcache */
        __dc_disable();
 
index 8eff057efcaebeae04b1fb801c003418090551eb..2eaecfb063a7336f2a78c54e31ab09f2576c63f6 100644 (file)
@@ -26,8 +26,8 @@ config EZNPS_MTM_EXT
        help
          Here we add new hierarchy for CPUs topology.
          We got:
-               Core
-               Thread
+           Core
+           Thread
          At the new thread level each CPU represent one HW thread.
          At highest hierarchy each core contain 16 threads,
          any of them seem like CPU from Linux point of view.
@@ -35,10 +35,10 @@ config EZNPS_MTM_EXT
          core and HW scheduler round robin between them.
 
 config EZNPS_MEM_ERROR_ALIGN
-       bool "ARC-EZchip Memory error as an exception"
-       depends on EZNPS_MTM_EXT
-       default n
-       help
+       bool "ARC-EZchip Memory error as an exception"
+       depends on EZNPS_MTM_EXT
+       default n
+       help
          On the real chip of the NPS, user memory errors are handled
          as a machine check exception, which is fatal, whereas on
          simulator platform for NPS, is handled as a Level 2 interrupt
index 054ead960f983a99a9f241ce1427fe0e1cd6cb8a..dc9855c4a3b404cff6a4dd2ac81ba92d12bbe4fc 100644 (file)
@@ -73,7 +73,7 @@ config ARM
        select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
        select HAVE_EXIT_THREAD
        select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
-       select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL
+       select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
        select HAVE_FUNCTION_TRACER if !XIP_KERNEL
        select HAVE_GCC_PLUGINS
        select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
@@ -178,10 +178,6 @@ config TRACE_IRQFLAGS_SUPPORT
        bool
        default !CPU_V7M
 
-config RWSEM_XCHGADD_ALGORITHM
-       bool
-       default y
-
 config ARCH_HAS_ILOG2_U32
        bool
 
@@ -596,6 +592,7 @@ config ARCH_DAVINCI
        select HAVE_IDE
        select PM_GENERIC_DOMAINS if PM
        select PM_GENERIC_DOMAINS_OF if PM && OF
+       select REGMAP_MMIO
        select RESET_CONTROLLER
        select SPARSE_IRQ
        select USE_OF
index 6d6e0330930b52f7369a46536473fa7174fad2d9..e388af4594a6e5e42a860469e10a53b89522e7bf 100644 (file)
@@ -47,8 +47,8 @@ config DEBUG_WX
 
 choice
        prompt "Choose kernel unwinder"
-       default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER
-       default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER
+       default UNWINDER_ARM if AEABI
+       default UNWINDER_FRAME_POINTER if !AEABI
        help
          This determines which method will be used for unwinding kernel stack
          traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
@@ -65,7 +65,7 @@ config UNWINDER_FRAME_POINTER
 
 config UNWINDER_ARM
        bool "ARM EABI stack unwinder"
-       depends on AEABI
+       depends on AEABI && !FUNCTION_GRAPH_TRACER
        select ARM_UNWIND
        help
          This option enables stack unwinding support in the kernel
index 6c7ccb428c079c3e43ef9cce2c344ec4b6809369..7135820f76d4f8b8d24374738332c0c4c0644bf7 100644 (file)
@@ -1438,7 +1438,21 @@ ENTRY(efi_stub_entry)
 
                @ Preserve return value of efi_entry() in r4
                mov     r4, r0
-               bl      cache_clean_flush
+
+               @ our cache maintenance code relies on CP15 barrier instructions
+               @ but since we arrived here with the MMU and caches configured
+               @ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
+               @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
+               @ the enable path will be executed on v7+ only.
+               mrc     p15, 0, r1, c1, c0, 0   @ read SCTLR
+               tst     r1, #(1 << 5)           @ CP15BEN bit set?
+               bne     0f
+               orr     r1, r1, #(1 << 5)       @ CP15 barrier instructions
+               mcr     p15, 0, r1, c1, c0, 0   @ write SCTLR
+ ARM(          .inst   0xf57ff06f              @ v7+ isb       )
+ THUMB(                isb                                             )
+
+0:             bl      cache_clean_flush
                bl      cache_off
 
                @ Set parameters for booting zImage according to boot protocol
index dce5be5df97bd91abe3ff039e8befab58656124b..edcff79879e780e5aa307dfc0d18f393663a7f78 100644 (file)
                enable-active-high;
        };
 
+       /* TPS79501 */
+       v1_8d_reg: fixedregulator-v1_8d {
+               compatible = "regulator-fixed";
+               regulator-name = "v1_8d";
+               vin-supply = <&vbat>;
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+       };
+
+       /* TPS79501 */
+       v3_3d_reg: fixedregulator-v3_3d {
+               compatible = "regulator-fixed";
+               regulator-name = "v3_3d";
+               vin-supply = <&vbat>;
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+       };
+
        matrix_keypad: matrix_keypad0 {
                compatible = "gpio-matrix-keypad";
                debounce-delay-ms = <5>;
                status = "okay";
 
                /* Regulators */
-               AVDD-supply = <&vaux2_reg>;
-               IOVDD-supply = <&vaux2_reg>;
-               DRVDD-supply = <&vaux2_reg>;
-               DVDD-supply = <&vbat>;
+               AVDD-supply = <&v3_3d_reg>;
+               IOVDD-supply = <&v3_3d_reg>;
+               DRVDD-supply = <&v3_3d_reg>;
+               DVDD-supply = <&v1_8d_reg>;
        };
 };
 
index b128998097ce7180cb2a72291bb83ea2a19d0f52..2c2d8b5b8cf52bf55b28b20a47488363c895681c 100644 (file)
                enable-active-high;
        };
 
+       /* TPS79518 */
+       v1_8d_reg: fixedregulator-v1_8d {
+               compatible = "regulator-fixed";
+               regulator-name = "v1_8d";
+               vin-supply = <&vbat>;
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+       };
+
+       /* TPS78633 */
+       v3_3d_reg: fixedregulator-v3_3d {
+               compatible = "regulator-fixed";
+               regulator-name = "v3_3d";
+               vin-supply = <&vbat>;
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+       };
+
        leds {
                pinctrl-names = "default";
                pinctrl-0 = <&user_leds_s0>;
                status = "okay";
 
                /* Regulators */
-               AVDD-supply = <&vaux2_reg>;
-               IOVDD-supply = <&vaux2_reg>;
-               DRVDD-supply = <&vaux2_reg>;
-               DVDD-supply = <&vbat>;
+               AVDD-supply = <&v3_3d_reg>;
+               IOVDD-supply = <&v3_3d_reg>;
+               DRVDD-supply = <&v3_3d_reg>;
+               DVDD-supply = <&v1_8d_reg>;
        };
 };
 
index f459ec316a22d4cd723d43dd97d3709106aacedd..ca6d9f02a800c8a0e042d43280fa762ebce6fef4 100644 (file)
                        reg = <0xcc000 0x4>;
                        reg-names = "rev";
                        /* Domains (P, C): per_pwrdm, l4ls_clkdm */
-                       clocks = <&l4ls_clkctrl AM3_D_CAN0_CLKCTRL 0>;
+                       clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>;
                        clock-names = "fck";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        reg = <0xd0000 0x4>;
                        reg-names = "rev";
                        /* Domains (P, C): per_pwrdm, l4ls_clkdm */
-                       clocks = <&l4ls_clkctrl AM3_D_CAN1_CLKCTRL 0>;
+                       clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>;
                        clock-names = "fck";
                        #address-cells = <1>;
                        #size-cells = <1>;
index 5641d162dfdb0c106eed6f7f4dc4f7c120930970..28e7513ce61713a084bc5f91f96cc2426d3f50a8 100644 (file)
@@ -93,7 +93,7 @@
 };
 
 &hdmi {
-       hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
+       hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
 };
 
 &pwm {
index b715ab0fa1ffc09c24e101b4f506b9f9bb900550..e8d800fec63790925701a460afa8415c9706d8dc 100644 (file)
                        reg = <2>;
                };
 
-               switch@0 {
+               switch@10 {
                        compatible = "qca,qca8334";
-                       reg = <0>;
+                       reg = <10>;
 
                        switch_ports: ports {
                                #address-cells = <1>;
                                ethphy0: port@0 {
                                        reg = <0>;
                                        label = "cpu";
-                                       phy-mode = "rgmii";
+                                       phy-mode = "rgmii-id";
                                        ethernet = <&fec>;
 
                                        fixed-link {
index 1d1b4bd0670ffd094d2939ed9c91095d8ae8ba39..a4217f564a5347a568830e2032dd3fac2ae1c80f 100644 (file)
        pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
        vmcc-supply = <&reg_sd3_vmmc>;
        cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
-       bus-witdh = <4>;
+       bus-width = <4>;
        no-1-8-v;
        status = "okay";
 };
        pinctrl-1 = <&pinctrl_usdhc4_100mhz>;
        pinctrl-2 = <&pinctrl_usdhc4_200mhz>;
        vmcc-supply = <&reg_sd4_vmmc>;
-       bus-witdh = <8>;
+       bus-width = <8>;
        no-1-8-v;
        non-removable;
        status = "okay";
index 433bf09a1954c5ff05e1f3b3255c326fb69bf615..027df06c5dc7d60c9711ebef8b9333e2fe0c9a58 100644 (file)
@@ -91,6 +91,7 @@
        pinctrl-0 = <&pinctrl_enet>;
        phy-handle = <&ethphy>;
        phy-mode = "rgmii";
+       phy-reset-duration = <10>; /* in msecs */
        phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
        phy-supply = <&vdd_eth_io_reg>;
        status = "disabled";
index f6fb6783c1933154049768297372832f68586a04..54cfe72295aa47a278ee8d5ffae5c688b6d8b4fa 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (C) 2016 Freescale Semiconductor, Inc.
  * Copyright (C) 2017 NXP
index aa107ee41b8b8f3fbc13b92676224561fe0f92c0..ef653c3209bcc995aaa5d5cd79d0b3cf3fd0f8a0 100644 (file)
                        };
 
                        vccio_sd: LDO_REG5 {
+                               regulator-boot-on;
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-name = "vccio_sd";
        bus-width = <4>;
        cap-mmc-highspeed;
        cap-sd-highspeed;
-       card-detect-delay = <200>;
+       broken-cd;
        disable-wp;                     /* wp not hooked up */
        pinctrl-names = "default";
        pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
index 0bc2409f6903ffec1512942e9e657d9d983a2c35..192dbc089ade1730b9dce6bca8d356f3b0c83a00 100644 (file)
@@ -25,8 +25,6 @@
 
        gpio_keys: gpio-keys {
                compatible = "gpio-keys";
-               #address-cells = <1>;
-               #size-cells = <0>;
 
                pinctrl-names = "default";
                pinctrl-0 = <&pwr_key_l>;
index ca7d52daa8fb638641e3b90f633bcf1c1a1e5497..a024d1e7e74cd94eade3e5c00ed9d56353ef93aa 100644 (file)
@@ -70,7 +70,7 @@
                        compatible = "arm,cortex-a12";
                        reg = <0x501>;
                        resets = <&cru SRST_CORE1>;
-                       operating-points = <&cpu_opp_table>;
+                       operating-points-v2 = <&cpu_opp_table>;
                        #cooling-cells = <2>; /* min followed by max */
                        clock-latency = <40000>;
                        clocks = <&cru ARMCLK>;
@@ -80,7 +80,7 @@
                        compatible = "arm,cortex-a12";
                        reg = <0x502>;
                        resets = <&cru SRST_CORE2>;
-                       operating-points = <&cpu_opp_table>;
+                       operating-points-v2 = <&cpu_opp_table>;
                        #cooling-cells = <2>; /* min followed by max */
                        clock-latency = <40000>;
                        clocks = <&cru ARMCLK>;
@@ -90,7 +90,7 @@
                        compatible = "arm,cortex-a12";
                        reg = <0x503>;
                        resets = <&cru SRST_CORE3>;
-                       operating-points = <&cpu_opp_table>;
+                       operating-points-v2 = <&cpu_opp_table>;
                        #cooling-cells = <2>; /* min followed by max */
                        clock-latency = <40000>;
                        clocks = <&cru ARMCLK>;
                clock-names = "ref", "pclk";
                power-domains = <&power RK3288_PD_VIO>;
                rockchip,grf = <&grf>;
-               #address-cells = <1>;
-               #size-cells = <0>;
                status = "disabled";
 
                ports {
        gpu_opp_table: gpu-opp-table {
                compatible = "operating-points-v2";
 
-               opp@100000000 {
+               opp-100000000 {
                        opp-hz = /bits/ 64 <100000000>;
                        opp-microvolt = <950000>;
                };
-               opp@200000000 {
+               opp-200000000 {
                        opp-hz = /bits/ 64 <200000000>;
                        opp-microvolt = <950000>;
                };
-               opp@300000000 {
+               opp-300000000 {
                        opp-hz = /bits/ 64 <300000000>;
                        opp-microvolt = <1000000>;
                };
-               opp@400000000 {
+               opp-400000000 {
                        opp-hz = /bits/ 64 <400000000>;
                        opp-microvolt = <1100000>;
                };
-               opp@500000000 {
+               opp-500000000 {
                        opp-hz = /bits/ 64 <500000000>;
                        opp-microvolt = <1200000>;
                };
-               opp@600000000 {
+               opp-600000000 {
                        opp-hz = /bits/ 64 <600000000>;
                        opp-microvolt = <1250000>;
                };
index 1c01a6f843d8a43c07ab25dd18ffb57acd044c0b..28a2e45752fea34eb2efb576439409c0611d9d90 100644 (file)
 #define PIN_PC9__GPIO                  PINMUX_PIN(PIN_PC9, 0, 0)
 #define PIN_PC9__FIQ                   PINMUX_PIN(PIN_PC9, 1, 3)
 #define PIN_PC9__GTSUCOMP              PINMUX_PIN(PIN_PC9, 2, 1)
-#define PIN_PC9__ISC_D0                        PINMUX_PIN(PIN_PC9, 2, 1)
+#define PIN_PC9__ISC_D0                        PINMUX_PIN(PIN_PC9, 3, 1)
 #define PIN_PC9__TIOA4                 PINMUX_PIN(PIN_PC9, 4, 2)
 #define PIN_PC10                       74
 #define PIN_PC10__GPIO                 PINMUX_PIN(PIN_PC10, 0, 0)
index 8661dd9b064a5cdfd4a8801a8b98e9c9f45d7dc0..b37f8e675e4081b200bfd9b9a97d565efce1d1f7 100644 (file)
@@ -170,6 +170,9 @@ CONFIG_IMX_SDMA=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_IIO=y
 CONFIG_FSL_MX25_ADC=y
+CONFIG_PWM=y
+CONFIG_PWM_IMX1=y
+CONFIG_PWM_IMX27=y
 CONFIG_EXT4_FS=y
 # CONFIG_DNOTIFY is not set
 CONFIG_VFAT_FS=y
index 5586a5074a96b6a84165e32f59ea2fa0800b484a..50fb01d70b1030ca6d2f721b30eaa8078894b589 100644 (file)
@@ -398,7 +398,7 @@ CONFIG_MAG3110=y
 CONFIG_MPL3115=y
 CONFIG_PWM=y
 CONFIG_PWM_FSL_FTM=y
-CONFIG_PWM_IMX=y
+CONFIG_PWM_IMX27=y
 CONFIG_NVMEM_IMX_OCOTP=y
 CONFIG_NVMEM_VF610_OCOTP=y
 CONFIG_TEE=y
index a8a4eb7f6dae0371940a9cc70c077e2194fb02bc..8fb51b7bf1d587499f2f782d1f699c47adc91b7d 100644 (file)
@@ -12,7 +12,6 @@ generic-y += mm-arch-hooks.h
 generic-y += msi.h
 generic-y += parport.h
 generic-y += preempt.h
-generic-y += rwsem.h
 generic-y += seccomp.h
 generic-y += segment.h
 generic-y += serial.h
index 2de96a180166eb920833b1100159716735f5e206..31de4ab930050cb6e7584fd45747b5e329fc6bfd 100644 (file)
@@ -381,6 +381,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
        return ret;
 }
 
+static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
+                                      const void *data, unsigned long len)
+{
+       int srcu_idx = srcu_read_lock(&kvm->srcu);
+       int ret = kvm_write_guest(kvm, gpa, data, len);
+
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+       return ret;
+}
+
 static inline void *kvm_get_hyp_vector(void)
 {
        switch(read_cpuid_part()) {
index de2089501b8b5705a29bcb80b7007d630cfabc60..9e11dce55e06f4e7359b7b779cc7814ae752c813 100644 (file)
@@ -75,6 +75,8 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
 
 #define S2_PMD_MASK                            PMD_MASK
 #define S2_PMD_SIZE                            PMD_SIZE
+#define S2_PUD_MASK                            PUD_MASK
+#define S2_PUD_SIZE                            PUD_SIZE
 
 static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
 {
index 06dea6bce293b934e1146d26aa316ea8e36e80b5..080ce70cab12a6944af4120ed5a3b9ca9889411d 100644 (file)
@@ -55,53 +55,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       if (n == 0)
-               return;
-
-       if (i + n > SYSCALL_MAX_ARGS) {
-               unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
-               unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
-               pr_warn("%s called with max args %d, handling only %d\n",
-                       __func__, i + n, SYSCALL_MAX_ARGS);
-               memset(args_bad, 0, n_bad * sizeof(args[0]));
-               n = SYSCALL_MAX_ARGS - i;
-       }
-
-       if (i == 0) {
-               args[0] = regs->ARM_ORIG_r0;
-               args++;
-               i++;
-               n--;
-       }
-
-       memcpy(args, &regs->ARM_r0 + i, n * sizeof(args[0]));
+       args[0] = regs->ARM_ORIG_r0;
+       args++;
+
+       memcpy(args, &regs->ARM_r0 + 1, 5 * sizeof(args[0]));
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       if (n == 0)
-               return;
-
-       if (i + n > SYSCALL_MAX_ARGS) {
-               pr_warn("%s called with max args %d, handling only %d\n",
-                       __func__, i + n, SYSCALL_MAX_ARGS);
-               n = SYSCALL_MAX_ARGS - i;
-       }
-
-       if (i == 0) {
-               regs->ARM_ORIG_r0 = args[0];
-               args++;
-               i++;
-               n--;
-       }
-
-       memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
+       regs->ARM_ORIG_r0 = args[0];
+       args++;
+
+       memcpy(&regs->ARM_r0 + 1, args, 5 * sizeof(args[0]));
 }
 
 static inline int syscall_get_arch(void)
index f854148c8d7c258927b031d0c87e8aa8a142e309..bc6d04a098998b5079a4e5e652d0ce540b0fafba 100644 (file)
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 
-#define MMU_GATHER_BUNDLE      8
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 static inline void __tlb_remove_table(void *_table)
 {
        free_page_and_swap_cache((struct page *)_table);
 }
 
-struct mmu_table_batch {
-       struct rcu_head         rcu;
-       unsigned int            nr;
-       void                    *tables[0];
-};
-
-#define MAX_TABLE_BATCH                \
-       ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
-
-extern void tlb_table_flush(struct mmu_gather *tlb);
-extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-
-#define tlb_remove_entry(tlb, entry)   tlb_remove_table(tlb, entry)
-#else
-#define tlb_remove_entry(tlb, entry)   tlb_remove_page(tlb, entry)
-#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
-
-/*
- * TLB handling.  This allows us to remove pages from the page
- * tables, and efficiently handle the TLB issues.
- */
-struct mmu_gather {
-       struct mm_struct        *mm;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-       struct mmu_table_batch  *batch;
-       unsigned int            need_flush;
-#endif
-       unsigned int            fullmm;
-       struct vm_area_struct   *vma;
-       unsigned long           start, end;
-       unsigned long           range_start;
-       unsigned long           range_end;
-       unsigned int            nr;
-       unsigned int            max;
-       struct page             **pages;
-       struct page             *local[MMU_GATHER_BUNDLE];
-};
-
-DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
-
-/*
- * This is unnecessarily complex.  There's three ways the TLB shootdown
- * code is used:
- *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
- *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
- *     tlb->vma will be non-NULL.
- *  2. Unmapping all vmas.  See exit_mmap().
- *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
- *     tlb->vma will be non-NULL.  Additionally, page tables will be freed.
- *  3. Unmapping argument pages.  See shift_arg_pages().
- *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
- *     tlb->vma will be NULL.
- */
-static inline void tlb_flush(struct mmu_gather *tlb)
-{
-       if (tlb->fullmm || !tlb->vma)
-               flush_tlb_mm(tlb->mm);
-       else if (tlb->range_end > 0) {
-               flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
-               tlb->range_start = TASK_SIZE;
-               tlb->range_end = 0;
-       }
-}
-
-static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
-{
-       if (!tlb->fullmm) {
-               if (addr < tlb->range_start)
-                       tlb->range_start = addr;
-               if (addr + PAGE_SIZE > tlb->range_end)
-                       tlb->range_end = addr + PAGE_SIZE;
-       }
-}
-
-static inline void __tlb_alloc_page(struct mmu_gather *tlb)
-{
-       unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
-
-       if (addr) {
-               tlb->pages = (void *)addr;
-               tlb->max = PAGE_SIZE / sizeof(struct page *);
-       }
-}
-
-static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
-       tlb_flush(tlb);
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-       tlb_table_flush(tlb);
-#endif
-}
-
-static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
-       free_pages_and_swap_cache(tlb->pages, tlb->nr);
-       tlb->nr = 0;
-       if (tlb->pages == tlb->local)
-               __tlb_alloc_page(tlb);
-}
-
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
-{
-       tlb_flush_mmu_tlbonly(tlb);
-       tlb_flush_mmu_free(tlb);
-}
-
-static inline void
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-                       unsigned long start, unsigned long end)
-{
-       tlb->mm = mm;
-       tlb->fullmm = !(start | (end+1));
-       tlb->start = start;
-       tlb->end = end;
-       tlb->vma = NULL;
-       tlb->max = ARRAY_SIZE(tlb->local);
-       tlb->pages = tlb->local;
-       tlb->nr = 0;
-       __tlb_alloc_page(tlb);
+#include <asm-generic/tlb.h>
 
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-       tlb->batch = NULL;
+#ifndef CONFIG_HAVE_RCU_TABLE_FREE
+#define tlb_remove_table(tlb, entry) tlb_remove_page(tlb, entry)
 #endif
-}
-
-static inline void
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
-                       unsigned long start, unsigned long end, bool force)
-{
-       if (force) {
-               tlb->range_start = start;
-               tlb->range_end = end;
-       }
-
-       tlb_flush_mmu(tlb);
-
-       /* keep the page table cache within bounds */
-       check_pgt_cache();
-
-       if (tlb->pages != tlb->local)
-               free_pages((unsigned long)tlb->pages, 0);
-}
-
-/*
- * Memorize the range for the TLB flush.
- */
-static inline void
-tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
-{
-       tlb_add_flush(tlb, addr);
-}
-
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)       \
-       tlb_remove_tlb_entry(tlb, ptep, address)
-/*
- * In the case of tlb vma handling, we can optimise these away in the
- * case where we're doing a full MM flush.  When we're doing a munmap,
- * the vmas are adjusted to only cover the region to be torn down.
- */
-static inline void
-tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
-{
-       if (!tlb->fullmm) {
-               flush_cache_range(vma, vma->vm_start, vma->vm_end);
-               tlb->vma = vma;
-               tlb->range_start = TASK_SIZE;
-               tlb->range_end = 0;
-       }
-}
 
 static inline void
-tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
-{
-       if (!tlb->fullmm)
-               tlb_flush(tlb);
-}
-
-static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
-       tlb->pages[tlb->nr++] = page;
-       VM_WARN_ON(tlb->nr > tlb->max);
-       if (tlb->nr == tlb->max)
-               return true;
-       return false;
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
-       if (__tlb_remove_page(tlb, page))
-               tlb_flush_mmu(tlb);
-}
-
-static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
-                                         struct page *page, int page_size)
-{
-       return __tlb_remove_page(tlb, page);
-}
-
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
-                                       struct page *page, int page_size)
-{
-       return tlb_remove_page(tlb, page);
-}
-
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
-       unsigned long addr)
+__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
 {
        pgtable_page_dtor(pte);
 
-#ifdef CONFIG_ARM_LPAE
-       tlb_add_flush(tlb, addr);
-#else
+#ifndef CONFIG_ARM_LPAE
        /*
         * With the classic ARM MMU, a pte page has two corresponding pmd
         * entries, each covering 1MB.
         */
-       addr &= PMD_MASK;
-       tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
-       tlb_add_flush(tlb, addr + SZ_1M);
+       addr = (addr & PMD_MASK) + SZ_1M;
+       __tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
 #endif
 
-       tlb_remove_entry(tlb, pte);
-}
-
-static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
-                                 unsigned long addr)
-{
-#ifdef CONFIG_ARM_LPAE
-       tlb_add_flush(tlb, addr);
-       tlb_remove_entry(tlb, virt_to_page(pmdp));
-#endif
+       tlb_remove_table(tlb, pte);
 }
 
 static inline void
-tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
-{
-       tlb_add_flush(tlb, addr);
-}
-
-#define pte_free_tlb(tlb, ptep, addr)  __pte_free_tlb(tlb, ptep, addr)
-#define pmd_free_tlb(tlb, pmdp, addr)  __pmd_free_tlb(tlb, pmdp, addr)
-#define pud_free_tlb(tlb, pudp, addr)  pud_free((tlb)->mm, pudp)
-
-#define tlb_migrate_finish(mm)         do { } while (0)
-
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
-                                                    unsigned int page_size)
+__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
 {
-}
-
-static inline void tlb_flush_remove_tables(struct mm_struct *mm)
-{
-}
+#ifdef CONFIG_ARM_LPAE
+       struct page *page = virt_to_page(pmdp);
 
-static inline void tlb_flush_remove_tables_local(void *arg)
-{
+       tlb_remove_table(tlb, page);
+#endif
 }
 
 #endif /* CONFIG_MMU */
index 23b4464c0995ab3ed1673f7aa36e16a3554e296a..ce8573157774dc078e49a5a2d5ccecc3ad54986d 100644 (file)
@@ -3,3 +3,4 @@
 generated-y += unistd-common.h
 generated-y += unistd-oabi.h
 generated-y += unistd-eabi.h
+generic-y += kvm_para.h
diff --git a/arch/arm/include/uapi/asm/kvm_para.h b/arch/arm/include/uapi/asm/kvm_para.h
deleted file mode 100644 (file)
index baacc49..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/kvm_para.h>
index c08d2d890f7b918981c472c155c6df368a1b30b3..b38bbd011b358f433e3c9201fdb2015611286252 100644 (file)
@@ -133,9 +133,9 @@ __secondary_data:
  */
        .text
 __after_proc_init:
-#ifdef CONFIG_ARM_MPU
 M_CLASS(movw   r12, #:lower16:BASEADDR_V7M_SCB)
 M_CLASS(movt   r12, #:upper16:BASEADDR_V7M_SCB)
+#ifdef CONFIG_ARM_MPU
 M_CLASS(ldr    r3, [r12, 0x50])
 AR_CLASS(mrc   p15, 0, r3, c0, c1, 4)          @ Read ID_MMFR0
        and     r3, r3, #(MMFR0_PMSA)           @ PMSA field
index 76bb8de6bf6b6983bf5a231ae66b8a162bd9e3ba..be5edfdde558d600494c6720850a7ed0c0e0b153 100644 (file)
@@ -549,8 +549,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
        int ret;
 
        /*
-        * Increment event counter and perform fixup for the pre-signal
-        * frame.
+        * Perform fixup for the pre-signal frame.
         */
        rseq_signal_deliver(ksig, regs);
 
index a56e7c856ab5648995888ae5f47c5d1ab23b08bf..86870f40f9a07558877d1d7dba098ad5926c4bbf 100644 (file)
@@ -115,8 +115,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
                 * running on another CPU?  For now, ignore it as we
                 * can't guarantee we won't explode.
                 */
-               if (trace->nr_entries < trace->max_entries)
-                       trace->entries[trace->nr_entries++] = ULONG_MAX;
                return;
 #else
                frame.fp = thread_saved_fp(tsk);
@@ -134,8 +132,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
        }
 
        walk_stackframe(&frame, save_trace, &data);
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 
 void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
@@ -153,8 +149,6 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
        frame.pc = regs->ARM_pc;
 
        walk_stackframe(&frame, save_trace, &data);
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 
 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
index 51e808adb00cc23576b1f9148f0388542627d397..2a757dcaa1a5e9d63b5ae47833ef31d12ab94aa2 100644 (file)
@@ -591,13 +591,13 @@ static int __init at91_pm_backup_init(void)
 
        np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
        if (!np)
-               goto securam_fail;
+               goto securam_fail_no_ref_dev;
 
        pdev = of_find_device_by_node(np);
        of_node_put(np);
        if (!pdev) {
                pr_warn("%s: failed to find securam device!\n", __func__);
-               goto securam_fail;
+               goto securam_fail_no_ref_dev;
        }
 
        sram_pool = gen_pool_get(&pdev->dev, NULL);
@@ -620,6 +620,8 @@ static int __init at91_pm_backup_init(void)
        return 0;
 
 securam_fail:
+       put_device(&pdev->dev);
+securam_fail_no_ref_dev:
        iounmap(pm_data.sfrbu);
        pm_data.sfrbu = NULL;
        return ret;
index 7d5a44a06648de2fd8e5e15beef19762d6925e81..f676592d840210558a5daf54e1f7c265be06a918 100644 (file)
@@ -90,7 +90,7 @@ void __init cns3xxx_map_io(void)
 /* used by entry-macro.S */
 void __init cns3xxx_init_irq(void)
 {
-       gic_init(0, 29, IOMEM(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT),
+       gic_init(IOMEM(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT),
                 IOMEM(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT));
 }
 
index bfeb25aaf9a2a7a48857a3896fb682d7d94568a8..326e870d712394fad445033defd8e3ff5975ebdd 100644 (file)
 #include "cpuidle.h"
 #include "hardware.h"
 
-static atomic_t master = ATOMIC_INIT(0);
-static DEFINE_SPINLOCK(master_lock);
+static int num_idle_cpus = 0;
+static DEFINE_SPINLOCK(cpuidle_lock);
 
 static int imx6q_enter_wait(struct cpuidle_device *dev,
                            struct cpuidle_driver *drv, int index)
 {
-       if (atomic_inc_return(&master) == num_online_cpus()) {
-               /*
-                * With this lock, we prevent other cpu to exit and enter
-                * this function again and become the master.
-                */
-               if (!spin_trylock(&master_lock))
-                       goto idle;
+       spin_lock(&cpuidle_lock);
+       if (++num_idle_cpus == num_online_cpus())
                imx6_set_lpm(WAIT_UNCLOCKED);
-               cpu_do_idle();
-               imx6_set_lpm(WAIT_CLOCKED);
-               spin_unlock(&master_lock);
-               goto done;
-       }
+       spin_unlock(&cpuidle_lock);
 
-idle:
        cpu_do_idle();
-done:
-       atomic_dec(&master);
+
+       spin_lock(&cpuidle_lock);
+       if (num_idle_cpus-- == num_online_cpus())
+               imx6_set_lpm(WAIT_CLOCKED);
+       spin_unlock(&cpuidle_lock);
 
        return index;
 }
index c7169c2f94c4fd8cc018caa790c7b170e778eaf3..08c7892866c2df48732d15b9aa64329d0b009b75 100644 (file)
@@ -59,6 +59,7 @@ static void __init imx51_m4if_setup(void)
                return;
 
        m4if_base = of_iomap(np, 0);
+       of_node_put(np);
        if (!m4if_base) {
                pr_err("Unable to map M4IF registers\n");
                return;
index 53c316f7301e69fcbebbfe5d73bb48664180f5b6..fe4932fda01d7d0bc819c0ca4e6dcedb6b061081 100644 (file)
@@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = {
        }
 };
 
-static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
+static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32);
 static struct iop_adma_platform_data iop13xx_adma_0_data = {
        .hw_id = 0,
        .pool_size = PAGE_SIZE,
@@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = {
        .resource = iop13xx_adma_0_resources,
        .dev = {
                .dma_mask = &iop13xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop13xx_adma_0_data,
        },
 };
@@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = {
        .resource = iop13xx_adma_1_resources,
        .dev = {
                .dma_mask = &iop13xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop13xx_adma_1_data,
        },
 };
@@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = {
        .resource = iop13xx_adma_2_resources,
        .dev = {
                .dma_mask = &iop13xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop13xx_adma_2_data,
        },
 };
index db511ec2b1df6824cb6d3d24659cfebe2428d5ec..116feb6b261eb7b0e08ee7ce248e44682e537898 100644 (file)
@@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
        }
 };
 
-u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
+u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32);
 static struct platform_device iop13xx_tpmi_0_device = {
        .name = "iop-tpmi",
        .id = 0,
@@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
        .resource = iop13xx_tpmi_0_resources,
        .dev = {
                .dma_mask          = &iop13xx_tpmi_mask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
@@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
        .resource = iop13xx_tpmi_1_resources,
        .dev = {
                .dma_mask          = &iop13xx_tpmi_mask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
@@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
        .resource = iop13xx_tpmi_2_resources,
        .dev = {
                .dma_mask          = &iop13xx_tpmi_mask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
@@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
        .resource = iop13xx_tpmi_3_resources,
        .dev = {
                .dma_mask          = &iop13xx_tpmi_mask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
index 591543c81399b4f976b52599cf84b7cb22e512ea..3ea880f5fcb7338b8e419db9fd49c06cee263ca0 100644 (file)
@@ -65,6 +65,7 @@ static void m10v_smp_init(unsigned int max_cpus)
                writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
 static void m10v_cpu_die(unsigned int l_cpu)
 {
        gic_cpu_if_down(0);
@@ -83,12 +84,15 @@ static int m10v_cpu_kill(unsigned int l_cpu)
 
        return 1;
 }
+#endif
 
 static struct smp_operations m10v_smp_ops __initdata = {
        .smp_prepare_cpus       = m10v_smp_init,
        .smp_boot_secondary     = m10v_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
        .cpu_die                = m10v_cpu_die,
        .cpu_kill               = m10v_cpu_kill,
+#endif
 };
 CPU_METHOD_OF_DECLARE(m10v_smp, "socionext,milbeaut-m10v-smp", &m10v_smp_ops);
 
index be30c3c061b46ee0c1adf3ce55a872eb7bc9c9c0..1b15d593837ed78ea22298ccc4ae60cb3de166f1 100644 (file)
@@ -182,6 +182,7 @@ static struct resource latch1_resources[] = {
 
 static struct bgpio_pdata latch1_pdata = {
        .label  = LATCH1_LABEL,
+       .base   = -1,
        .ngpio  = LATCH1_NGPIO,
 };
 
@@ -219,6 +220,7 @@ static struct resource latch2_resources[] = {
 
 static struct bgpio_pdata latch2_pdata = {
        .label  = LATCH2_LABEL,
+       .base   = -1,
        .ngpio  = LATCH2_NGPIO,
 };
 
index 1444b4b4bd9f85e54368c0e18ac31f3f2fc033eb..439e143cad7b5d4d8ef48122816f9acf436570c3 100644 (file)
@@ -250,8 +250,10 @@ static int __init omapdss_init_of(void)
        if (!node)
                return 0;
 
-       if (!of_device_is_available(node))
+       if (!of_device_is_available(node)) {
+               of_node_put(node);
                return 0;
+       }
 
        pdev = of_find_device_by_node(node);
 
index a4d1f8de3b5b23453ee4738723164a5ba8405424..d9612221e4848971f4ea27cf4f5d4c319073e439 100644 (file)
@@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
        .resource = iop3xx_dma_0_resources,
        .dev = {
                .dma_mask = &iop3xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop3xx_dma_0_data,
        },
 };
@@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
        .resource = iop3xx_dma_1_resources,
        .dev = {
                .dma_mask = &iop3xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop3xx_dma_1_data,
        },
 };
@@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
        .resource = iop3xx_aau_resources,
        .dev = {
                .dma_mask = &iop3xx_adma_dmamask,
-               .coherent_dma_mask = DMA_BIT_MASK(64),
+               .coherent_dma_mask = DMA_BIT_MASK(32),
                .platform_data = (void *) &iop3xx_aau_data,
        },
 };
index a6c81ce00f520625880c29c083b3f70384c3db1f..8647cb80a93bd222234f4951f2249ac0399ca025 100644 (file)
@@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = {
        .resource       = orion_xor0_shared_resources,
        .dev            = {
                .dma_mask               = &orion_xor_dmamask,
-               .coherent_dma_mask      = DMA_BIT_MASK(64),
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
                .platform_data          = &orion_xor0_pdata,
        },
 };
@@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = {
        .resource       = orion_xor1_shared_resources,
        .dev            = {
                .dma_mask               = &orion_xor_dmamask,
-               .coherent_dma_mask      = DMA_BIT_MASK(64),
+               .coherent_dma_mask      = DMA_BIT_MASK(32),
                .platform_data          = &orion_xor1_pdata,
        },
 };
index 9016f4081bb9cff33886860e9a1d48f0ee58e47c..0393917eaa57aaf8cda4548b9a34a705be6c73a0 100644 (file)
 421    common  rt_sigtimedwait_time64          sys_rt_sigtimedwait
 422    common  futex_time64                    sys_futex
 423    common  sched_rr_get_interval_time64    sys_sched_rr_get_interval
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index 117b2541ef3d16dfe09ba67784e9faf3b43634e6..d81adca1b04dbaa102dc06dc2f253a93a3184063 100644 (file)
@@ -149,7 +149,6 @@ config ARM64
        select HAVE_PERF_USER_STACK_DUMP
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RCU_TABLE_FREE
-       select HAVE_RCU_TABLE_INVALIDATE
        select HAVE_RSEQ
        select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
@@ -159,7 +158,6 @@ config ARM64
        select IRQ_DOMAIN
        select IRQ_FORCED_THREADING
        select MODULES_USE_ELF_RELA
-       select MULTI_IRQ_HANDLER
        select NEED_DMA_MAP_STATE
        select NEED_SG_DMA_LENGTH
        select OF
@@ -238,9 +236,6 @@ config LOCKDEP_SUPPORT
 config TRACE_IRQFLAGS_SUPPORT
        def_bool y
 
-config RWSEM_XCHGADD_ALGORITHM
-       def_bool y
-
 config GENERIC_BUG
        def_bool y
        depends on BUG
index 70498a033cf57408ccdefe374c5fa8e1d22e785d..b5ca9c50876d9a23947dde5d7fe553104c9c0805 100644 (file)
@@ -27,6 +27,7 @@ config ARCH_BCM2835
        bool "Broadcom BCM2835 family"
        select TIMER_OF
        select GPIOLIB
+       select MFD_CORE
        select PINCTRL
        select PINCTRL_BCM2835
        select ARM_AMBA
index 7c649f6b14cb6eb73ea8fb23ded74ed5152d3a70..cd7c76e58b09a60f75ccd510083bab730a2378b0 100644 (file)
                        rx-fifo-depth = <16384>;
                        snps,multicast-filter-bins = <256>;
                        iommus = <&smmu 1>;
+                       altr,sysmgr-syscon = <&sysmgr 0x44 0>;
                        status = "disabled";
                };
 
                        rx-fifo-depth = <16384>;
                        snps,multicast-filter-bins = <256>;
                        iommus = <&smmu 2>;
+                       altr,sysmgr-syscon = <&sysmgr 0x48 0>;
                        status = "disabled";
                };
 
                        rx-fifo-depth = <16384>;
                        snps,multicast-filter-bins = <256>;
                        iommus = <&smmu 3>;
+                       altr,sysmgr-syscon = <&sysmgr 0x4c 0>;
                        status = "disabled";
                };
 
index bb2045be8814036ddced1d4a7ec5b42951343832..97aeb946ed5e7473639ec94a498512d48a12ca8b 100644 (file)
                nvidia,default-trim = <0x9>;
                nvidia,dqs-trim = <63>;
                mmc-hs400-1_8v;
-               supports-cqe;
                status = "disabled";
        };
 
index 61a0afb74e6310b2b4c16bcf9939f6eab7db6258..1ea684af99c4a19b674f2ab90e38680584b09cf4 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Device Tree Source for the RZ/G2E (R8A774C0) SoC
  *
- * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
  */
 
 #include <dt-bindings/clock/r8a774c0-cpg-mssr.h>
                                 <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
                                 <&scif_clk>;
                        clock-names = "fck", "brg_int", "scif_clk";
-                       dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
-                              <&dmac2 0x5b>, <&dmac2 0x5a>;
-                       dma-names = "tx", "rx", "tx", "rx";
+                       dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
+                       dma-names = "tx", "rx";
                        power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
                        resets = <&cpg 202>;
                        status = "disabled";
index a69faa60ea4da4bb06a257af39881138a026c6d1..d2ad665fe2d925db040e50d2d9341b5535ddd167 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Device Tree Source for the R-Car E3 (R8A77990) SoC
  *
- * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
  */
 
 #include <dt-bindings/clock/r8a77990-cpg-mssr.h>
                                 <&cpg CPG_CORE R8A77990_CLK_S3D1C>,
                                 <&scif_clk>;
                        clock-names = "fck", "brg_int", "scif_clk";
-                       dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
-                              <&dmac2 0x5b>, <&dmac2 0x5a>;
-                       dma-names = "tx", "rx", "tx", "rx";
+                       dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
+                       dma-names = "tx", "rx";
                        power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
                        resets = <&cpg 202>;
                        status = "disabled";
index 33c44e857247e4a64847f22945c4c06bb7735ecf..0e34354b20927698482fddaf6814483394a18b93 100644 (file)
        snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>;
        snps,reset-active-low;
        snps,reset-delays-us = <0 10000 50000>;
-       tx_delay = <0x25>;
-       rx_delay = <0x11>;
+       tx_delay = <0x24>;
+       rx_delay = <0x18>;
        status = "okay";
 };
 
index 2157a528276bffae23afbaf3152db66292b7817a..79b4d1d4b5d6b67672dcbab1de19d274cecd5c5b 100644 (file)
@@ -46,8 +46,7 @@
 
        vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator {
                compatible = "regulator-fixed";
-               enable-active-high;
-               gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
+               gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
                pinctrl-names = "default";
                pinctrl-0 = <&usb20_host_drv>;
                regulator-name = "vcc_host1_5v";
index 84f14b132e8f5fb80bf3f178a72f5e138d144bd3..dabef1a21649ba44ee4b880d83d9b24591ac1d9d 100644 (file)
 
                sdmmc0 {
                        sdmmc0_clk: sdmmc0-clk {
-                               rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>;
+                               rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>;
                        };
 
                        sdmmc0_cmd: sdmmc0-cmd {
-                               rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>;
+                               rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>;
                        };
 
                        sdmmc0_dectn: sdmmc0-dectn {
                        };
 
                        sdmmc0_bus1: sdmmc0-bus1 {
-                               rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>;
+                               rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>;
                        };
 
                        sdmmc0_bus4: sdmmc0-bus4 {
-                               rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>,
-                                               <1 RK_PA1 1 &pcfg_pull_up_4ma>,
-                                               <1 RK_PA2 1 &pcfg_pull_up_4ma>,
-                                               <1 RK_PA3 1 &pcfg_pull_up_4ma>;
+                               rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>,
+                                               <1 RK_PA1 1 &pcfg_pull_up_8ma>,
+                                               <1 RK_PA2 1 &pcfg_pull_up_8ma>,
+                                               <1 RK_PA3 1 &pcfg_pull_up_8ma>;
                        };
 
                        sdmmc0_gpio: sdmmc0-gpio {
                        rgmiim1_pins: rgmiim1-pins {
                                rockchip,pins =
                                        /* mac_txclk */
-                                       <1 RK_PB4 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PB4 2 &pcfg_pull_none_8ma>,
                                        /* mac_rxclk */
-                                       <1 RK_PB5 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PB5 2 &pcfg_pull_none_4ma>,
                                        /* mac_mdio */
-                                       <1 RK_PC3 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PC3 2 &pcfg_pull_none_4ma>,
                                        /* mac_txen */
-                                       <1 RK_PD1 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PD1 2 &pcfg_pull_none_8ma>,
                                        /* mac_clk */
-                                       <1 RK_PC5 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PC5 2 &pcfg_pull_none_4ma>,
                                        /* mac_rxdv */
-                                       <1 RK_PC6 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PC6 2 &pcfg_pull_none_4ma>,
                                        /* mac_mdc */
-                                       <1 RK_PC7 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PC7 2 &pcfg_pull_none_4ma>,
                                        /* mac_rxd1 */
-                                       <1 RK_PB2 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PB2 2 &pcfg_pull_none_4ma>,
                                        /* mac_rxd0 */
-                                       <1 RK_PB3 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PB3 2 &pcfg_pull_none_4ma>,
                                        /* mac_txd1 */
-                                       <1 RK_PB0 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PB0 2 &pcfg_pull_none_8ma>,
                                        /* mac_txd0 */
-                                       <1 RK_PB1 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PB1 2 &pcfg_pull_none_8ma>,
                                        /* mac_rxd3 */
-                                       <1 RK_PB6 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PB6 2 &pcfg_pull_none_4ma>,
                                        /* mac_rxd2 */
-                                       <1 RK_PB7 2 &pcfg_pull_none_2ma>,
+                                       <1 RK_PB7 2 &pcfg_pull_none_4ma>,
                                        /* mac_txd3 */
-                                       <1 RK_PC0 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PC0 2 &pcfg_pull_none_8ma>,
                                        /* mac_txd2 */
-                                       <1 RK_PC1 2 &pcfg_pull_none_12ma>,
+                                       <1 RK_PC1 2 &pcfg_pull_none_8ma>,
 
                                        /* mac_txclk */
-                                       <0 RK_PB0 1 &pcfg_pull_none>,
+                                       <0 RK_PB0 1 &pcfg_pull_none_8ma>,
                                        /* mac_txen */
-                                       <0 RK_PB4 1 &pcfg_pull_none>,
+                                       <0 RK_PB4 1 &pcfg_pull_none_8ma>,
                                        /* mac_clk */
-                                       <0 RK_PD0 1 &pcfg_pull_none>,
+                                       <0 RK_PD0 1 &pcfg_pull_none_4ma>,
                                        /* mac_txd1 */
-                                       <0 RK_PC0 1 &pcfg_pull_none>,
+                                       <0 RK_PC0 1 &pcfg_pull_none_8ma>,
                                        /* mac_txd0 */
-                                       <0 RK_PC1 1 &pcfg_pull_none>,
+                                       <0 RK_PC1 1 &pcfg_pull_none_8ma>,
                                        /* mac_txd3 */
-                                       <0 RK_PC7 1 &pcfg_pull_none>,
+                                       <0 RK_PC7 1 &pcfg_pull_none_8ma>,
                                        /* mac_txd2 */
-                                       <0 RK_PC6 1 &pcfg_pull_none>;
+                                       <0 RK_PC6 1 &pcfg_pull_none_8ma>;
                        };
 
                        rmiim1_pins: rmiim1-pins {
index 4a543f2117d4212b9e26578a64db9ad982ff5c59..844eac939a97c58f9aea4a2e681b39dd6648f4f1 100644 (file)
 };
 
 &hdmi {
+       ddc-i2c-bus = <&i2c3>;
        pinctrl-names = "default";
        pinctrl-0 = <&hdmi_cec>;
        status = "okay";
index 1e17ea5c372b2782cb11bde1a8cfb9162bb4e9e8..60a933b070019a9d8cd43a9d779d92e802b1c08b 100644 (file)
@@ -16,7 +16,6 @@ generic-y += mm-arch-hooks.h
 generic-y += msi.h
 generic-y += qrwlock.h
 generic-y += qspinlock.h
-generic-y += rwsem.h
 generic-y += segment.h
 generic-y += serial.h
 generic-y += set_memory.h
index 2afb1338b48a482c4a528a96ebda0ffb310fb987..5f1437099b9979ac983ae0896272229e2b04f1e3 100644 (file)
@@ -77,6 +77,7 @@
 #define ARM_CPU_IMP_QCOM               0x51
 #define ARM_CPU_IMP_NVIDIA             0x4E
 #define ARM_CPU_IMP_FUJITSU            0x46
+#define ARM_CPU_IMP_HISI               0x48
 
 #define ARM_CPU_PART_AEM_V8            0xD0F
 #define ARM_CPU_PART_FOUNDATION                0xD00
 
 #define FUJITSU_CPU_PART_A64FX         0x001
 
+#define HISI_CPU_PART_TSV110           0xD01
+
 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
 #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
 #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
 #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
 #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
 #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
+#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
 
 /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
 #define MIDR_FUJITSU_ERRATUM_010001            MIDR_FUJITSU_A64FX
-#define MIDR_FUJITSU_ERRATUM_010001_MASK       (~MIDR_VARIANT(1))
+#define MIDR_FUJITSU_ERRATUM_010001_MASK       (~MIDR_CPU_VAR_REV(1, 0))
 #define TCR_CLEAR_FUJITSU_ERRATUM_010001       (TCR_NFD1 | TCR_NFD0)
 
 #ifndef __ASSEMBLY__
index cccb83ad7fa8ea2e1f4251dd724edc62c754771b..c7e1a7837706c17eeffd96edd17bcc4da0009af2 100644 (file)
@@ -30,8 +30,8 @@ do {                                                                  \
 "      prfm    pstl1strm, %2\n"                                        \
 "1:    ldxr    %w1, %2\n"                                              \
        insn "\n"                                                       \
-"2:    stlxr   %w3, %w0, %2\n"                                         \
-"      cbnz    %w3, 1b\n"                                              \
+"2:    stlxr   %w0, %w3, %2\n"                                         \
+"      cbnz    %w0, 1b\n"                                              \
 "      dmb     ish\n"                                                  \
 "3:\n"                                                                 \
 "      .pushsection .fixup,\"ax\"\n"                                   \
@@ -57,23 +57,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
 
        switch (op) {
        case FUTEX_OP_SET:
-               __futex_atomic_op("mov  %w0, %w4",
+               __futex_atomic_op("mov  %w3, %w4",
                                  ret, oldval, uaddr, tmp, oparg);
                break;
        case FUTEX_OP_ADD:
-               __futex_atomic_op("add  %w0, %w1, %w4",
+               __futex_atomic_op("add  %w3, %w1, %w4",
                                  ret, oldval, uaddr, tmp, oparg);
                break;
        case FUTEX_OP_OR:
-               __futex_atomic_op("orr  %w0, %w1, %w4",
+               __futex_atomic_op("orr  %w3, %w1, %w4",
                                  ret, oldval, uaddr, tmp, oparg);
                break;
        case FUTEX_OP_ANDN:
-               __futex_atomic_op("and  %w0, %w1, %w4",
+               __futex_atomic_op("and  %w3, %w1, %w4",
                                  ret, oldval, uaddr, tmp, ~oparg);
                break;
        case FUTEX_OP_XOR:
-               __futex_atomic_op("eor  %w0, %w1, %w4",
+               __futex_atomic_op("eor  %w3, %w1, %w4",
                                  ret, oldval, uaddr, tmp, oparg);
                break;
        default:
index b0742a16c6c9e43ca73888c2c9778042174328a2..ebeefcf835e8d7f65040fe5eed38ba4693a1f67b 100644 (file)
@@ -445,6 +445,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
        return ret;
 }
 
+static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
+                                      const void *data, unsigned long len)
+{
+       int srcu_idx = srcu_read_lock(&kvm->srcu);
+       int ret = kvm_write_guest(kvm, gpa, data, len);
+
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+       return ret;
+}
+
 #ifdef CONFIG_KVM_INDIRECT_VECTORS
 /*
  * EL2 vectors can be mapped and rerouted in a number of ways,
index 905e1bb0e7bd023b7174da7a6a81459df87b40dc..cd9f4e9d04d3be6564843e821b2a612642717210 100644 (file)
@@ -73,4 +73,9 @@ static inline bool is_forbidden_offset_for_adrp(void *place)
 struct plt_entry get_plt_entry(u64 dst, void *pc);
 bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
 
+static inline bool plt_entry_is_initialized(const struct plt_entry *e)
+{
+       return e->adrp || e->add || e->br;
+}
+
 #endif /* __ASM_MODULE_H */
index ad8be16a39c9d18bdbd406f522c02432529c4cf6..a179df3674a1aa207dfdead37e47219353b67b91 100644 (file)
@@ -65,52 +65,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       if (n == 0)
-               return;
-
-       if (i + n > SYSCALL_MAX_ARGS) {
-               unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
-               unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
-               pr_warning("%s called with max args %d, handling only %d\n",
-                          __func__, i + n, SYSCALL_MAX_ARGS);
-               memset(args_bad, 0, n_bad * sizeof(args[0]));
-       }
-
-       if (i == 0) {
-               args[0] = regs->orig_x0;
-               args++;
-               i++;
-               n--;
-       }
-
-       memcpy(args, &regs->regs[i], n * sizeof(args[0]));
+       args[0] = regs->orig_x0;
+       args++;
+
+       memcpy(args, &regs->regs[1], 5 * sizeof(args[0]));
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       if (n == 0)
-               return;
-
-       if (i + n > SYSCALL_MAX_ARGS) {
-               pr_warning("%s called with max args %d, handling only %d\n",
-                          __func__, i + n, SYSCALL_MAX_ARGS);
-               n = SYSCALL_MAX_ARGS - i;
-       }
-
-       if (i == 0) {
-               regs->orig_x0 = args[0];
-               args++;
-               i++;
-               n--;
-       }
-
-       memcpy(&regs->regs[i], args, n * sizeof(args[0]));
+       regs->orig_x0 = args[0];
+       args++;
+
+       memcpy(&regs->regs[1], args, 5 * sizeof(args[0]));
 }
 
 /*
index 106fdc951b6eefdda0a97c877c2493b7bdfac1f8..37603b5616a588e514da402c49a85029d47c74dd 100644 (file)
@@ -27,6 +27,7 @@ static inline void __tlb_remove_table(void *_table)
        free_page_and_swap_cache((struct page *)_table);
 }
 
+#define tlb_flush tlb_flush
 static void tlb_flush(struct mmu_gather *tlb);
 
 #include <asm-generic/tlb.h>
index d1dd93436e1eedad0ea3cf83ba1cdc6b3fd50c22..f2a83ff6b73c2414110c02dc14aa24686d6ada9c 100644 (file)
@@ -44,7 +44,7 @@
 #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE + 5)
 #define __ARM_NR_COMPAT_END            (__ARM_NR_COMPAT_BASE + 0x800)
 
-#define __NR_compat_syscalls           424
+#define __NR_compat_syscalls           428
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
index 5590f262369079bca3b66561a51e9b3f4705cdd7..23f1a44acada413fb4e2ad5411624d2925c71835 100644 (file)
@@ -866,6 +866,14 @@ __SYSCALL(__NR_rt_sigtimedwait_time64, compat_sys_rt_sigtimedwait_time64)
 __SYSCALL(__NR_futex_time64, sys_futex)
 #define __NR_sched_rr_get_interval_time64 423
 __SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
+#define __NR_pidfd_send_signal 424
+__SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal)
+#define __NR_io_uring_setup 425
+__SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
+#define __NR_io_uring_enter 426
+__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
+#define __NR_io_uring_register 427
+__SYSCALL(__NR_io_uring_register, sys_io_uring_register)
 
 /*
  * Please add new compat syscalls above this comment and update
index e24e94d2876717b8ef3ac0311aa419049255134e..4061de10cea6ccb0ccaa890c4f2dd98d4a2bcd91 100644 (file)
@@ -963,6 +963,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
                MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
                MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
                MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+               MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
                { /* sentinel */ }
        };
        char const *str = "command line option";
index 8e4431a8821f5920e49910dd287db5882e73f330..65a51331088eb0afd0db70e52fb03335cc8151dc 100644 (file)
@@ -103,12 +103,16 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
                 * to be revisited if support for multiple ftrace entry points
                 * is added in the future, but for now, the pr_err() below
                 * deals with a theoretical issue only.
+                *
+                * Note that PLTs are place relative, and plt_entries_equal()
+                * checks whether they point to the same target. Here, we need
+                * to check if the actual opcodes are in fact identical,
+                * regardless of the offset in memory so use memcmp() instead.
                 */
                trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
-               if (!plt_entries_equal(mod->arch.ftrace_trampoline,
-                                      &trampoline)) {
-                       if (!plt_entries_equal(mod->arch.ftrace_trampoline,
-                                              &(struct plt_entry){})) {
+               if (memcmp(mod->arch.ftrace_trampoline, &trampoline,
+                          sizeof(trampoline))) {
+                       if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
                                pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
                                return -EINVAL;
                        }
index 7fb6f3aa5ceb7172c91277067da624085b158433..7a679caf45856e75c21860aaa7522c080fb48e41 100644 (file)
@@ -91,8 +91,6 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
 int __kprobes arch_prepare_kprobe(struct kprobe *p)
 {
        unsigned long probe_addr = (unsigned long)p->addr;
-       extern char __start_rodata[];
-       extern char __end_rodata[];
 
        if (probe_addr & 0x3)
                return -EINVAL;
@@ -100,10 +98,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
        /* copy instruction */
        p->opcode = le32_to_cpu(*p->addr);
 
-       if (in_exception_text(probe_addr))
-               return -EINVAL;
-       if (probe_addr >= (unsigned long) __start_rodata &&
-           probe_addr <= (unsigned long) __end_rodata)
+       if (search_exception_tables(probe_addr))
                return -EINVAL;
 
        /* decode instruction */
@@ -476,26 +471,37 @@ kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
        return DBG_HOOK_HANDLED;
 }
 
-bool arch_within_kprobe_blacklist(unsigned long addr)
+/*
+ * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
+ * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
+ */
+int __init arch_populate_kprobe_blacklist(void)
 {
-       if ((addr >= (unsigned long)__kprobes_text_start &&
-           addr < (unsigned long)__kprobes_text_end) ||
-           (addr >= (unsigned long)__entry_text_start &&
-           addr < (unsigned long)__entry_text_end) ||
-           (addr >= (unsigned long)__idmap_text_start &&
-           addr < (unsigned long)__idmap_text_end) ||
-           (addr >= (unsigned long)__hyp_text_start &&
-           addr < (unsigned long)__hyp_text_end) ||
-           !!search_exception_tables(addr))
-               return true;
-
-       if (!is_kernel_in_hyp_mode()) {
-               if ((addr >= (unsigned long)__hyp_idmap_text_start &&
-                   addr < (unsigned long)__hyp_idmap_text_end))
-                       return true;
-       }
-
-       return false;
+       int ret;
+
+       ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start,
+                                       (unsigned long)__entry_text_end);
+       if (ret)
+               return ret;
+       ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
+                                       (unsigned long)__irqentry_text_end);
+       if (ret)
+               return ret;
+       ret = kprobe_add_area_blacklist((unsigned long)__exception_text_start,
+                                       (unsigned long)__exception_text_end);
+       if (ret)
+               return ret;
+       ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start,
+                                       (unsigned long)__idmap_text_end);
+       if (ret)
+               return ret;
+       ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start,
+                                       (unsigned long)__hyp_text_end);
+       if (ret || is_kernel_in_hyp_mode())
+               return ret;
+       ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start,
+                                       (unsigned long)__hyp_idmap_text_end);
+       return ret;
 }
 
 void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
index 5ba4465e44f09028c89fb190b7d65927635a9d10..ea94cf8f9dc6d15f58a7c8e298eba6d8bfdecede 100644 (file)
@@ -94,6 +94,9 @@ static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
        unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
        unsigned long high = low + SDEI_STACK_SIZE;
 
+       if (!low)
+               return false;
+
        if (sp < low || sp >= high)
                return false;
 
@@ -111,6 +114,9 @@ static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
        unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
        unsigned long high = low + SDEI_STACK_SIZE;
 
+       if (!low)
+               return false;
+
        if (sp < low || sp >= high)
                return false;
 
index f8482fe5a190f47937ee188aa7ff3cbf67bcf2a2..413d566405d175ee882fc4f29a017a6fd39ce0b6 100644 (file)
@@ -217,7 +217,7 @@ static void __init request_standard_resources(void)
 
        num_standard_resources = memblock.memory.cnt;
        res_size = num_standard_resources * sizeof(*standard_resources);
-       standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES);
+       standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
        if (!standard_resources)
                panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
 
index 1a29f2695ff24849304ad1f2106203b5114c5071..b00ec7d483d1c33b848e869885067551da214252 100644 (file)
@@ -140,9 +140,8 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
 #endif
 
        walk_stackframe(current, &frame, save_trace, &data);
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
+EXPORT_SYMBOL_GPL(save_stack_trace_regs);
 
 static noinline void __save_stack_trace(struct task_struct *tsk,
        struct stack_trace *trace, unsigned int nosched)
@@ -171,8 +170,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
 #endif
 
        walk_stackframe(tsk, &frame, save_trace, &data);
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
 
        put_task_stack(tsk);
 }
index 8ad119c3f665d4e8001038ccf3bd6dcb62e2e224..29755989f616c187481803b27ca9dbfcf0a7847b 100644 (file)
@@ -102,10 +102,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 {
        struct stackframe frame;
-       int skip;
+       int skip = 0;
 
        pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
 
+       if (regs) {
+               if (user_mode(regs))
+                       return;
+               skip = 1;
+       }
+
        if (!tsk)
                tsk = current;
 
@@ -126,7 +132,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
        frame.graph = 0;
 #endif
 
-       skip = !!regs;
        printk("Call trace:\n");
        do {
                /* skip until specified stack frame */
@@ -176,15 +181,13 @@ static int __die(const char *str, int err, struct pt_regs *regs)
                return ret;
 
        print_modules();
-       __show_regs(regs);
        pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
                 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
                 end_of_stack(tsk));
+       show_regs(regs);
 
-       if (!user_mode(regs)) {
-               dump_backtrace(regs, tsk);
+       if (!user_mode(regs))
                dump_instr(KERN_EMERG, regs);
-       }
 
        return ret;
 }
index f16a5f8ff2b41fa4284da58d1d2caf7af7b46e7b..e2a0500cd7a27c9ecc5326dd2380f23917309f91 100644 (file)
@@ -123,6 +123,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        int ret = -EINVAL;
        bool loaded;
 
+       /* Reset PMU outside of the non-preemptible section */
+       kvm_pmu_vcpu_reset(vcpu);
+
        preempt_disable();
        loaded = (vcpu->cpu != -1);
        if (loaded)
@@ -170,9 +173,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                vcpu->arch.reset_state.reset = false;
        }
 
-       /* Reset PMU */
-       kvm_pmu_vcpu_reset(vcpu);
-
        /* Default workaround setup is enabled (if supported) */
        if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
                vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
index 6bc135042f5e4dc244dbf14e8ea953121931ad2b..7cae155e81a5fb71aa8148865e44d9482bfb5b9a 100644 (file)
@@ -363,7 +363,7 @@ void __init arm64_memblock_init(void)
                 * Otherwise, this is a no-op
                 */
                u64 base = phys_initrd_start & PAGE_MASK;
-               u64 size = PAGE_ALIGN(phys_initrd_size);
+               u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
 
                /*
                 * We can only add back the initrd memory if we don't end up
index e5cd3c5f8399ce1cb055315083db5b2f4873a1fe..eeb0471268a079994b843310de718f745624acb2 100644 (file)
@@ -20,6 +20,7 @@ config C6X
        select GENERIC_CLOCKEVENTS
        select MODULES_USE_ELF_RELA
        select ARCH_NO_COHERENT_DMA_MMAP
+       select MMU_GATHER_NO_RANGE if MMU
 
 config MMU
        def_bool n
@@ -27,9 +28,6 @@ config MMU
 config FPU
        def_bool n
 
-config RWSEM_GENERIC_SPINLOCK
-       def_bool y
-
 config GENERIC_CALIBRATE_DELAY
        def_bool y
 
index 63b4a170518220397d22ec86cf9911f470139b97..249c9f6f26dce7c2dd2a43f9a9f20bd0d29478f3 100644 (file)
@@ -19,6 +19,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
index ae2be315ee9c98d8440f4409be8b67cc261f8858..15ba8599858e6be5a860e23f338c8bd292da7207 100644 (file)
@@ -46,78 +46,27 @@ static inline void syscall_set_return_value(struct task_struct *task,
 }
 
 static inline void syscall_get_arguments(struct task_struct *task,
-                                        struct pt_regs *regs, unsigned int i,
-                                        unsigned int n, unsigned long *args)
+                                        struct pt_regs *regs,
+                                        unsigned long *args)
 {
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               *args++ = regs->a4;
-       case 1:
-               if (!n--)
-                       break;
-               *args++ = regs->b4;
-       case 2:
-               if (!n--)
-                       break;
-               *args++ = regs->a6;
-       case 3:
-               if (!n--)
-                       break;
-               *args++ = regs->b6;
-       case 4:
-               if (!n--)
-                       break;
-               *args++ = regs->a8;
-       case 5:
-               if (!n--)
-                       break;
-               *args++ = regs->b8;
-       case 6:
-               if (!n--)
-                       break;
-       default:
-               BUG();
-       }
+       *args++ = regs->a4;
+       *args++ = regs->b4;
+       *args++ = regs->a6;
+       *args++ = regs->b6;
+       *args++ = regs->a8;
+       *args   = regs->b8;
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               regs->a4 = *args++;
-       case 1:
-               if (!n--)
-                       break;
-               regs->b4 = *args++;
-       case 2:
-               if (!n--)
-                       break;
-               regs->a6 = *args++;
-       case 3:
-               if (!n--)
-                       break;
-               regs->b6 = *args++;
-       case 4:
-               if (!n--)
-                       break;
-               regs->a8 = *args++;
-       case 5:
-               if (!n--)
-                       break;
-               regs->a9 = *args++;
-       case 6:
-               if (!n)
-                       break;
-       default:
-               BUG();
-       }
+       regs->a4 = *args++;
+       regs->b4 = *args++;
+       regs->a6 = *args++;
+       regs->b6 = *args++;
+       regs->a8 = *args++;
+       regs->a9 = *args;
 }
 
 #endif /* __ASM_C6X_SYSCALLS_H */
index 34525dea1356645c12a4c3c9b6d1a5f57eec3851..240ba0febb57b7407070df4e52f6ae2c346a42c4 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef _ASM_C6X_TLB_H
 #define _ASM_C6X_TLB_H
 
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
 #include <asm-generic/tlb.h>
 
 #endif /* _ASM_C6X_TLB_H */
index 755bb11323d8feb92d12349bf444f151cd8acc29..1c72f04ff75da1a7f6918f00b14116a183a79313 100644 (file)
@@ -1,2 +1 @@
-generic-y += kvm_para.h
 generic-y += ucontext.h
index 725a115759c97695eec204f2c15ca399eab234ee..6555d178113221412b1c0f3dbd0ff09c63bb99a6 100644 (file)
@@ -92,9 +92,6 @@ config GENERIC_HWEIGHT
 config MMU
        def_bool y
 
-config RWSEM_GENERIC_SPINLOCK
-       def_bool y
-
 config STACKTRACE_SUPPORT
        def_bool y
 
index d637445737b78fd5c78c9994173a1e7c73eb3d1f..bda0a446c63ead759d9360d769fe68647a28f83d 100644 (file)
@@ -43,30 +43,20 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
 
 static inline void
 syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, unsigned long *args)
+                     unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       if (i == 0) {
-               args[0] = regs->orig_a0;
-               args++;
-               i++;
-               n--;
-       }
-       memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
+       args[0] = regs->orig_a0;
+       args++;
+       memcpy(args, &regs->a1, 5 * sizeof(args[0]));
 }
 
 static inline void
 syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, const unsigned long *args)
+                     const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       if (i == 0) {
-               regs->orig_a0 = args[0];
-               args++;
-               i++;
-               n--;
-       }
-       memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
+       regs->orig_a0 = args[0];
+       args++;
+       memcpy(&regs->a1, args, 5 * sizeof(regs->a1));
 }
 
 static inline int
index c071da34e0817be181e82ac3d7a9ca7eea85450c..61c01db6c29230ca8b60ffc64d00179ccc579b24 100644 (file)
@@ -27,9 +27,6 @@ config H8300
 config CPU_BIG_ENDIAN
        def_bool y
 
-config RWSEM_GENERIC_SPINLOCK
-       def_bool y
-
 config GENERIC_HWEIGHT
        def_bool y
 
index 3e7c8ecf151e13841d0c3e491f3927aa1586434c..e3dead402e5fbe94ebe53063968801c8f51360b5 100644 (file)
@@ -23,6 +23,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += linkage.h
 generic-y += local.h
 generic-y += local64.h
index 924990401237126585ea8fd105e4b57e8f9e5b24..ddd483c6ca95c9df50e9ed7b8c820c9884afcbeb 100644 (file)
@@ -17,34 +17,14 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
 
 static inline void
 syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, unsigned long *args)
+                     unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       while (n > 0) {
-               switch (i) {
-               case 0:
-                       *args++ = regs->er1;
-                       break;
-               case 1:
-                       *args++ = regs->er2;
-                       break;
-               case 2:
-                       *args++ = regs->er3;
-                       break;
-               case 3:
-                       *args++ = regs->er4;
-                       break;
-               case 4:
-                       *args++ = regs->er5;
-                       break;
-               case 5:
-                       *args++ = regs->er6;
-                       break;
-               }
-               i++;
-               n--;
-       }
+       *args++ = regs->er1;
+       *args++ = regs->er2;
+       *args++ = regs->er3;
+       *args++ = regs->er4;
+       *args++ = regs->er5;
+       *args   = regs->er6;
 }
 
 
index 98f344279904a684d367c4d55556c06bab3e74ab..d8201ca312061d96d9409fd27efc87cddf384196 100644 (file)
@@ -2,8 +2,6 @@
 #ifndef __H8300_TLB_H__
 #define __H8300_TLB_H__
 
-#define tlb_flush(tlb) do { } while (0)
-
 #include <asm-generic/tlb.h>
 
 #endif
index 755bb11323d8feb92d12349bf444f151cd8acc29..1c72f04ff75da1a7f6918f00b14116a183a79313 100644 (file)
@@ -1,2 +1 @@
-generic-y += kvm_para.h
 generic-y += ucontext.h
index ac441680dcc06acddcfdf717b1828c2cdc1962d8..3e54a53208d58ad7c7587dcc88920a692c5d2634 100644 (file)
@@ -65,12 +65,6 @@ config GENERIC_CSUM
 config GENERIC_IRQ_PROBE
        def_bool y
 
-config RWSEM_GENERIC_SPINLOCK
-       def_bool n
-
-config RWSEM_XCHGADD_ALGORITHM
-       def_bool y
-
 config GENERIC_HWEIGHT
        def_bool y
 
index b25fd42aa0f47372162decdff321f3cca2e1a4d8..3ff5f297acda7783d4d1494098636ad8e017fb1a 100644 (file)
@@ -19,6 +19,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
@@ -26,7 +27,6 @@ generic-y += mm-arch-hooks.h
 generic-y += pci.h
 generic-y += percpu.h
 generic-y += preempt.h
-generic-y += rwsem.h
 generic-y += sections.h
 generic-y += segment.h
 generic-y += serial.h
index 4af9c7b6f13af9490e4bee7b9f608c7d467cecb3..ae3a1e24fabd7193ff7d3dc142c4ca7d123f56e6 100644 (file)
@@ -37,10 +37,8 @@ static inline long syscall_get_nr(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       memcpy(args, &(&regs->r00)[i], n * sizeof(args[0]));
+       memcpy(args, &(&regs->r00)[0], 6 * sizeof(args[0]));
 }
 #endif
index 2f00772cc08a551df873985b29647ac388fb1e55..f71c4ba83614c38187fd5ca0a5e24bb5ff71749d 100644 (file)
 #include <linux/pagemap.h>
 #include <asm/tlbflush.h>
 
-/*
- * We don't need any special per-pte or per-vma handling...
- */
-#define tlb_start_vma(tlb, vma)                                do { } while (0)
-#define tlb_end_vma(tlb, vma)                          do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address)     do { } while (0)
-
-/*
- * .. because we flush the whole mm when it fills up
- */
-#define tlb_flush(tlb)         flush_tlb_mm((tlb)->mm)
-
 #include <asm-generic/tlb.h>
 
 #endif
diff --git a/arch/hexagon/include/uapi/asm/kvm_para.h b/arch/hexagon/include/uapi/asm/kvm_para.h
deleted file mode 100644 (file)
index baacc49..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/kvm_para.h>
index 8d7396bd1790319eb7fa9a10b671d2b922081b36..73a26f04644e3e27a17c1fc1ab4145f22f6d520c 100644 (file)
@@ -83,10 +83,6 @@ config STACKTRACE_SUPPORT
 config GENERIC_LOCKBREAK
        def_bool n
 
-config RWSEM_XCHGADD_ALGORITHM
-       bool
-       default y
-
 config HUGETLB_PAGE_SIZE_VARIABLE
        bool
        depends on HUGETLB_PAGE
index 43e21fe3499c43451f2915ff1274afa76059f16b..11f191689c9e8445a77ede3555452fb8d008a3d7 100644 (file)
@@ -2,6 +2,7 @@ generated-y += syscall_table.h
 generic-y += compat.h
 generic-y += exec.h
 generic-y += irq_work.h
+generic-y += kvm_para.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += preempt.h
index 5133739966bcfa00570aca667c88d96fe71e771a..beae261fbcb415b2321af6c333bd9ce5e304896f 100644 (file)
@@ -30,7 +30,6 @@ typedef void ia64_mv_irq_init_t (void);
 typedef void ia64_mv_send_ipi_t (int, int, int, int);
 typedef void ia64_mv_timer_interrupt_t (int, void *);
 typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long);
-typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
 typedef u8 ia64_mv_irq_to_vector (int);
 typedef unsigned int ia64_mv_local_vector_to_irq (u8);
 typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
@@ -79,11 +78,6 @@ machvec_noop (void)
 {
 }
 
-static inline void
-machvec_noop_mm (struct mm_struct *mm)
-{
-}
-
 static inline void
 machvec_noop_task (struct task_struct *task)
 {
@@ -96,7 +90,6 @@ machvec_noop_bus (struct pci_bus *bus)
 
 extern void machvec_setup (char **);
 extern void machvec_timer_interrupt (int, void *);
-extern void machvec_tlb_migrate_finish (struct mm_struct *);
 
 # if defined (CONFIG_IA64_HP_SIM)
 #  include <asm/machvec_hpsim.h>
@@ -124,7 +117,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
 #  define platform_send_ipi    ia64_mv.send_ipi
 #  define platform_timer_interrupt     ia64_mv.timer_interrupt
 #  define platform_global_tlb_purge    ia64_mv.global_tlb_purge
-#  define platform_tlb_migrate_finish  ia64_mv.tlb_migrate_finish
 #  define platform_dma_init            ia64_mv.dma_init
 #  define platform_dma_get_ops         ia64_mv.dma_get_ops
 #  define platform_irq_to_vector       ia64_mv.irq_to_vector
@@ -167,7 +159,6 @@ struct ia64_machine_vector {
        ia64_mv_send_ipi_t *send_ipi;
        ia64_mv_timer_interrupt_t *timer_interrupt;
        ia64_mv_global_tlb_purge_t *global_tlb_purge;
-       ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
        ia64_mv_dma_init *dma_init;
        ia64_mv_dma_get_ops *dma_get_ops;
        ia64_mv_irq_to_vector *irq_to_vector;
@@ -206,7 +197,6 @@ struct ia64_machine_vector {
        platform_send_ipi,                      \
        platform_timer_interrupt,               \
        platform_global_tlb_purge,              \
-       platform_tlb_migrate_finish,            \
        platform_dma_init,                      \
        platform_dma_get_ops,                   \
        platform_irq_to_vector,                 \
@@ -270,9 +260,6 @@ extern const struct dma_map_ops *dma_get_ops(struct device *);
 #ifndef platform_global_tlb_purge
 # define platform_global_tlb_purge     ia64_global_tlb_purge /* default to architected version */
 #endif
-#ifndef platform_tlb_migrate_finish
-# define platform_tlb_migrate_finish   machvec_noop_mm
-#endif
 #ifndef platform_kernel_launch_event
 # define platform_kernel_launch_event  machvec_noop
 #endif
index b5153d300289724622ae936d560b40a94e471500..a243e4fb4877d7416949359ae6b52d59d25d803d 100644 (file)
@@ -34,7 +34,6 @@ extern ia64_mv_irq_init_t sn_irq_init;
 extern ia64_mv_send_ipi_t sn2_send_IPI;
 extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
 extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
-extern ia64_mv_tlb_migrate_finish_t    sn_tlb_migrate_finish;
 extern ia64_mv_irq_to_vector sn_irq_to_vector;
 extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
 extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
@@ -77,7 +76,6 @@ extern ia64_mv_pci_fixup_bus_t                sn_pci_fixup_bus;
 #define platform_send_ipi              sn2_send_IPI
 #define platform_timer_interrupt       sn_timer_interrupt
 #define platform_global_tlb_purge       sn2_global_tlb_purge
-#define platform_tlb_migrate_finish    sn_tlb_migrate_finish
 #define platform_pci_fixup             sn_pci_fixup
 #define platform_inb                   __sn_inb
 #define platform_inw                   __sn_inw
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
deleted file mode 100644 (file)
index 9179106..0000000
+++ /dev/null
@@ -1,172 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * R/W semaphores for ia64
- *
- * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
- * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
- * Copyright (C) 2005 Christoph Lameter <cl@linux.com>
- *
- * Based on asm-i386/rwsem.h and other architecture implementation.
- *
- * The MSW of the count is the negated number of active writers and
- * waiting lockers, and the LSW is the total number of active locks.
- *
- * The lock count is initialized to 0 (no active and no waiting lockers).
- *
- * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
- * the case of an uncontended lock. Readers increment by 1 and see a positive
- * value when uncontended, negative if there are writers (and maybe) readers
- * waiting (in which case it goes to sleep).
- */
-
-#ifndef _ASM_IA64_RWSEM_H
-#define _ASM_IA64_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
-#endif
-
-#include <asm/intrinsics.h>
-
-#define RWSEM_UNLOCKED_VALUE           __IA64_UL_CONST(0x0000000000000000)
-#define RWSEM_ACTIVE_BIAS              (1L)
-#define RWSEM_ACTIVE_MASK              (0xffffffffL)
-#define RWSEM_WAITING_BIAS             (-0x100000000L)
-#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline int
-___down_read (struct rw_semaphore *sem)
-{
-       long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1);
-
-       return (result < 0);
-}
-
-static inline void
-__down_read (struct rw_semaphore *sem)
-{
-       if (___down_read(sem))
-               rwsem_down_read_failed(sem);
-}
-
-static inline int
-__down_read_killable (struct rw_semaphore *sem)
-{
-       if (___down_read(sem))
-               if (IS_ERR(rwsem_down_read_failed_killable(sem)))
-                       return -EINTR;
-
-       return 0;
-}
-
-/*
- * lock for writing
- */
-static inline long
-___down_write (struct rw_semaphore *sem)
-{
-       long old, new;
-
-       do {
-               old = atomic_long_read(&sem->count);
-               new = old + RWSEM_ACTIVE_WRITE_BIAS;
-       } while (atomic_long_cmpxchg_acquire(&sem->count, old, new) != old);
-
-       return old;
-}
-
-static inline void
-__down_write (struct rw_semaphore *sem)
-{
-       if (___down_write(sem))
-               rwsem_down_write_failed(sem);
-}
-
-static inline int
-__down_write_killable (struct rw_semaphore *sem)
-{
-       if (___down_write(sem)) {
-               if (IS_ERR(rwsem_down_write_failed_killable(sem)))
-                       return -EINTR;
-       }
-
-       return 0;
-}
-
-/*
- * unlock after reading
- */
-static inline void
-__up_read (struct rw_semaphore *sem)
-{
-       long result = ia64_fetchadd8_rel((unsigned long *)&sem->count.counter, -1);
-
-       if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
-               rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void
-__up_write (struct rw_semaphore *sem)
-{
-       long old, new;
-
-       do {
-               old = atomic_long_read(&sem->count);
-               new = old - RWSEM_ACTIVE_WRITE_BIAS;
-       } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
-
-       if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
-               rwsem_wake(sem);
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-static inline int
-__down_read_trylock (struct rw_semaphore *sem)
-{
-       long tmp;
-       while ((tmp = atomic_long_read(&sem->count)) >= 0) {
-               if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, tmp+1)) {
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-static inline int
-__down_write_trylock (struct rw_semaphore *sem)
-{
-       long tmp = atomic_long_cmpxchg_acquire(&sem->count,
-                       RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS);
-       return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void
-__downgrade_write (struct rw_semaphore *sem)
-{
-       long old, new;
-
-       do {
-               old = atomic_long_read(&sem->count);
-               new = old - RWSEM_WAITING_BIAS;
-       } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
-
-       if (old < 0)
-               rwsem_downgrade_wake(sem);
-}
-
-#endif /* _ASM_IA64_RWSEM_H */
index 1d0b875fec44fc0fd8a70876e8ee6191f66f6005..0d9e7fab4a79fddcc63d24c30f002e73d2b91db0 100644 (file)
@@ -59,26 +59,19 @@ static inline void syscall_set_return_value(struct task_struct *task,
 }
 
 extern void ia64_syscall_get_set_arguments(struct task_struct *task,
-       struct pt_regs *regs, unsigned int i, unsigned int n,
-       unsigned long *args, int rw);
+       struct pt_regs *regs, unsigned long *args, int rw);
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       ia64_syscall_get_set_arguments(task, regs, i, n, args, 0);
+       ia64_syscall_get_set_arguments(task, regs, args, 0);
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       ia64_syscall_get_set_arguments(task, regs, i, n, args, 1);
+       ia64_syscall_get_set_arguments(task, regs, args, 1);
 }
 
 static inline int syscall_get_arch(void)
index 516355a774bfe89b2dc8ce6413aa0f3a8e1e71c0..86ec034ba49917bcc2b71b8425ffdafd82ed1cae 100644 (file)
 #include <asm/tlbflush.h>
 #include <asm/machvec.h>
 
-/*
- * If we can't allocate a page to make a big batch of page pointers
- * to work on, then just handle a few from the on-stack structure.
- */
-#define        IA64_GATHER_BUNDLE      8
-
-struct mmu_gather {
-       struct mm_struct        *mm;
-       unsigned int            nr;
-       unsigned int            max;
-       unsigned char           fullmm;         /* non-zero means full mm flush */
-       unsigned char           need_flush;     /* really unmapped some PTEs? */
-       unsigned long           start, end;
-       unsigned long           start_addr;
-       unsigned long           end_addr;
-       struct page             **pages;
-       struct page             *local[IA64_GATHER_BUNDLE];
-};
-
-struct ia64_tr_entry {
-       u64 ifa;
-       u64 itir;
-       u64 pte;
-       u64 rr;
-}; /*Record for tr entry!*/
-
-extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
-extern void ia64_ptr_entry(u64 target_mask, int slot);
-
-extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
-
-/*
- region register macros
-*/
-#define RR_TO_VE(val)   (((val) >> 0) & 0x0000000000000001)
-#define RR_VE(val)     (((val) & 0x0000000000000001) << 0)
-#define RR_VE_MASK     0x0000000000000001L
-#define RR_VE_SHIFT    0
-#define RR_TO_PS(val)  (((val) >> 2) & 0x000000000000003f)
-#define RR_PS(val)     (((val) & 0x000000000000003f) << 2)
-#define RR_PS_MASK     0x00000000000000fcL
-#define RR_PS_SHIFT    2
-#define RR_RID_MASK    0x00000000ffffff00L
-#define RR_TO_RID(val)         ((val >> 8) & 0xffffff)
-
-static inline void
-ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
-{
-       tlb->need_flush = 0;
-
-       if (tlb->fullmm) {
-               /*
-                * Tearing down the entire address space.  This happens both as a result
-                * of exit() and execve().  The latter case necessitates the call to
-                * flush_tlb_mm() here.
-                */
-               flush_tlb_mm(tlb->mm);
-       } else if (unlikely (end - start >= 1024*1024*1024*1024UL
-                            || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
-       {
-               /*
-                * If we flush more than a tera-byte or across regions, we're probably
-                * better off just flushing the entire TLB(s).  This should be very rare
-                * and is not worth optimizing for.
-                */
-               flush_tlb_all();
-       } else {
-               /*
-                * flush_tlb_range() takes a vma instead of a mm pointer because
-                * some architectures want the vm_flags for ITLB/DTLB flush.
-                */
-               struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
-
-               /* flush the address range from the tlb: */
-               flush_tlb_range(&vma, start, end);
-               /* now flush the virt. page-table area mapping the address range: */
-               flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
-       }
-
-}
-
-static inline void
-ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
-       unsigned long i;
-       unsigned int nr;
-
-       /* lastly, release the freed pages */
-       nr = tlb->nr;
-
-       tlb->nr = 0;
-       tlb->start_addr = ~0UL;
-       for (i = 0; i < nr; ++i)
-               free_page_and_swap_cache(tlb->pages[i]);
-}
-
-/*
- * Flush the TLB for address range START to END and, if not in fast mode, release the
- * freed pages that where gathered up to this point.
- */
-static inline void
-ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
-{
-       if (!tlb->need_flush)
-               return;
-       ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
-       ia64_tlb_flush_mmu_free(tlb);
-}
-
-static inline void __tlb_alloc_page(struct mmu_gather *tlb)
-{
-       unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
-
-       if (addr) {
-               tlb->pages = (void *)addr;
-               tlb->max = PAGE_SIZE / sizeof(void *);
-       }
-}
-
-
-static inline void
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-                       unsigned long start, unsigned long end)
-{
-       tlb->mm = mm;
-       tlb->max = ARRAY_SIZE(tlb->local);
-       tlb->pages = tlb->local;
-       tlb->nr = 0;
-       tlb->fullmm = !(start | (end+1));
-       tlb->start = start;
-       tlb->end = end;
-       tlb->start_addr = ~0UL;
-}
-
-/*
- * Called at the end of the shootdown operation to free up any resources that were
- * collected.
- */
-static inline void
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
-                       unsigned long start, unsigned long end, bool force)
-{
-       if (force)
-               tlb->need_flush = 1;
-       /*
-        * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
-        * tlb->end_addr.
-        */
-       ia64_tlb_flush_mmu(tlb, start, end);
-
-       /* keep the page table cache within bounds */
-       check_pgt_cache();
-
-       if (tlb->pages != tlb->local)
-               free_pages((unsigned long)tlb->pages, 0);
-}
-
-/*
- * Logically, this routine frees PAGE.  On MP machines, the actual freeing of the page
- * must be delayed until after the TLB has been flushed (see comments at the beginning of
- * this file).
- */
-static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
-       tlb->need_flush = 1;
-
-       if (!tlb->nr && tlb->pages == tlb->local)
-               __tlb_alloc_page(tlb);
-
-       tlb->pages[tlb->nr++] = page;
-       VM_WARN_ON(tlb->nr > tlb->max);
-       if (tlb->nr == tlb->max)
-               return true;
-       return false;
-}
-
-static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
-       ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
-}
-
-static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
-       ia64_tlb_flush_mmu_free(tlb);
-}
-
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
-{
-       ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
-       if (__tlb_remove_page(tlb, page))
-               tlb_flush_mmu(tlb);
-}
-
-static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
-                                         struct page *page, int page_size)
-{
-       return __tlb_remove_page(tlb, page);
-}
-
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
-                                       struct page *page, int page_size)
-{
-       return tlb_remove_page(tlb, page);
-}
-
-/*
- * Remove TLB entry for PTE mapped at virtual address ADDRESS.  This is called for any
- * PTE, not just those pointing to (normal) physical memory.
- */
-static inline void
-__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
-{
-       if (tlb->start_addr == ~0UL)
-               tlb->start_addr = address;
-       tlb->end_addr = address + PAGE_SIZE;
-}
-
-#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
-
-#define tlb_start_vma(tlb, vma)                        do { } while (0)
-#define tlb_end_vma(tlb, vma)                  do { } while (0)
-
-#define tlb_remove_tlb_entry(tlb, ptep, addr)          \
-do {                                                   \
-       tlb->need_flush = 1;                            \
-       __tlb_remove_tlb_entry(tlb, ptep, addr);        \
-} while (0)
-
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)       \
-       tlb_remove_tlb_entry(tlb, ptep, address)
-
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
-                                                    unsigned int page_size)
-{
-}
-
-#define pte_free_tlb(tlb, ptep, address)               \
-do {                                                   \
-       tlb->need_flush = 1;                            \
-       __pte_free_tlb(tlb, ptep, address);             \
-} while (0)
-
-#define pmd_free_tlb(tlb, ptep, address)               \
-do {                                                   \
-       tlb->need_flush = 1;                            \
-       __pmd_free_tlb(tlb, ptep, address);             \
-} while (0)
-
-#define pud_free_tlb(tlb, pudp, address)               \
-do {                                                   \
-       tlb->need_flush = 1;                            \
-       __pud_free_tlb(tlb, pudp, address);             \
-} while (0)
+#include <asm-generic/tlb.h>
 
 #endif /* _ASM_IA64_TLB_H */
index 25e280810f6c423700e4f13a52a936c45dc6682b..ceac10c4d6e2f3e11fd4a7c06fdb47c71dcaf876 100644 (file)
 #include <asm/mmu_context.h>
 #include <asm/page.h>
 
+struct ia64_tr_entry {
+       u64 ifa;
+       u64 itir;
+       u64 pte;
+       u64 rr;
+}; /*Record for tr entry!*/
+
+extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
+extern void ia64_ptr_entry(u64 target_mask, int slot);
+extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
+
+/*
+ region register macros
+*/
+#define RR_TO_VE(val)   (((val) >> 0) & 0x0000000000000001)
+#define RR_VE(val)     (((val) & 0x0000000000000001) << 0)
+#define RR_VE_MASK     0x0000000000000001L
+#define RR_VE_SHIFT    0
+#define RR_TO_PS(val)  (((val) >> 2) & 0x000000000000003f)
+#define RR_PS(val)     (((val) & 0x000000000000003f) << 2)
+#define RR_PS_MASK     0x00000000000000fcL
+#define RR_PS_SHIFT    2
+#define RR_RID_MASK    0x00000000ffffff00L
+#define RR_TO_RID(val)         ((val >> 8) & 0xffffff)
+
 /*
  * Now for some TLB flushing routines.  This is the kind of stuff that
  * can be very expensive, so try to avoid them whenever possible.
index 20018cb883a90981565284ac1d5d9df51c38e987..62a9522af51e6651f560e06f8d3c3e2602b63f20 100644 (file)
@@ -1,2 +1 @@
 generated-y += unistd_64.h
-generic-y += kvm_para.h
index 6d50ede0ed691ca1899540722e65edb3cf896510..bf9c24d9ce84e66d1519ce7e5aa65330628d221b 100644 (file)
@@ -2179,12 +2179,11 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
 }
 
 void ia64_syscall_get_set_arguments(struct task_struct *task,
-       struct pt_regs *regs, unsigned int i, unsigned int n,
-       unsigned long *args, int rw)
+       struct pt_regs *regs, unsigned long *args, int rw)
 {
        struct syscall_get_set_args data = {
-               .i = i,
-               .n = n,
+               .i = 0,
+               .n = 6,
                .args = args,
                .regs = regs,
                .rw = rw,
index 583a3746d70be85de588b3dc64355cb85b389e61..c9cfa760cd57bfc4c00ce275708e6723422d9769 100644 (file)
@@ -1058,9 +1058,7 @@ check_bugs (void)
 
 static int __init run_dmi_scan(void)
 {
-       dmi_scan_machine();
-       dmi_memdev_walk();
-       dmi_set_dump_stack_arch_desc();
+       dmi_setup();
        return 0;
 }
 core_initcall(run_dmi_scan);
index ab9cda5f6136ad60753de5e725f6a6271ad88e9c..56e3d0b685e19119afc0a3e244ca64c3752aca4e 100644 (file)
 332    common  pkey_free                       sys_pkey_free
 333    common  rseq                            sys_rseq
 # 334 through 423 are reserved to sync up with other architectures
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index 5fc89aabdce1f8be105e8cb1e1805218cf9f77d9..5158bd28de0551588b29ab9ca2f7a76e0a18d409 100644 (file)
@@ -305,8 +305,8 @@ local_flush_tlb_all (void)
        ia64_srlz_i();                  /* srlz.i implies srlz.d */
 }
 
-void
-flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
+static void
+__flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
                 unsigned long end)
 {
        struct mm_struct *mm = vma->vm_mm;
@@ -343,6 +343,25 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
        preempt_enable();
        ia64_srlz_i();                  /* srlz.i implies srlz.d */
 }
+
+void flush_tlb_range(struct vm_area_struct *vma,
+               unsigned long start, unsigned long end)
+{
+       if (unlikely(end - start >= 1024*1024*1024*1024UL
+                       || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) {
+               /*
+                * If we flush more than a tera-byte or across regions, we're
+                * probably better off just flushing the entire TLB(s).  This
+                * should be very rare and is not worth optimizing for.
+                */
+               flush_tlb_all();
+       } else {
+               /* flush the address range from the tlb */
+               __flush_tlb_range(vma, start, end);
+               /* flush the virt. page-table area mapping the addr range */
+               __flush_tlb_range(vma, ia64_thash(start), ia64_thash(end));
+       }
+}
 EXPORT_SYMBOL(flush_tlb_range);
 
 void ia64_tlb_init(void)
index b73b0ebf82148eac5442a55eeb5f40a3e35897f9..b510f4f17fd4679abf2e0de1fd5191f6f56d5a8f 100644 (file)
@@ -120,13 +120,6 @@ void sn_migrate(struct task_struct *task)
                cpu_relax();
 }
 
-void sn_tlb_migrate_finish(struct mm_struct *mm)
-{
-       /* flush_tlb_mm is inefficient if more than 1 users of mm */
-       if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
-               flush_tlb_mm(mm);
-}
-
 static void
 sn2_ipi_flush_all_tlb(struct mm_struct *mm)
 {
index b54206408f91b9693581c9b6a139324a659cf361..735b9679fe6f31a36d5d85fe44a82b7c98985d1c 100644 (file)
@@ -28,17 +28,11 @@ config M68K
        select OLD_SIGSUSPEND3
        select OLD_SIGACTION
        select ARCH_DISCARD_MEMBLOCK
+       select MMU_GATHER_NO_RANGE if MMU
 
 config CPU_BIG_ENDIAN
        def_bool y
 
-config RWSEM_GENERIC_SPINLOCK
-       bool
-       default y
-
-config RWSEM_XCHGADD_ALGORITHM
-       bool
-
 config ARCH_HAS_ILOG2_U32
        bool
 
index 95f8f631c4df08aebc71b25d878fc29e0f89a1ba..2c359d9e80f63fe44468c29b7a48bb4033a9c31a 100644 (file)
@@ -13,6 +13,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
index b4b9efb6f963be8761e0cdc830be3cb5fb3bf3ab..3c81f6adfc8b36b26f53fea65941072707c9778f 100644 (file)
@@ -2,20 +2,6 @@
 #ifndef _M68K_TLB_H
 #define _M68K_TLB_H
 
-/*
- * m68k doesn't need any special per-pte or
- * per-vma handling..
- */
-#define tlb_start_vma(tlb, vma)        do { } while (0)
-#define tlb_end_vma(tlb, vma)  do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address)     do { } while (0)
-
-/*
- * .. because we flush the whole mm when it
- * fills up.
- */
-#define tlb_flush(tlb)         flush_tlb_mm((tlb)->mm)
-
 #include <asm-generic/tlb.h>
 
 #endif /* _M68K_TLB_H */
index 8a7ad40be463656854310b85dbba06d5f9b8e189..7417847dc438e5ff6aff14f04094a1323d6b933f 100644 (file)
@@ -1,2 +1 @@
 generated-y += unistd_32.h
-generic-y += kvm_para.h
index 125c14178979c010648895bd7925f85e5e54b10d..df4ec3ec71d1518bfac752044f7a1eae9291535a 100644 (file)
 421    common  rt_sigtimedwait_time64          sys_rt_sigtimedwait
 422    common  futex_time64                    sys_futex
 423    common  sched_rr_get_interval_time64    sys_sched_rr_get_interval
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index a51b965b3b82359f5feb15aaef1529cdbaa0f32a..adb179f519f950ee79dc1b7a9b2268a8fa2abf97 100644 (file)
@@ -41,6 +41,7 @@ config MICROBLAZE
        select TRACING_SUPPORT
        select VIRT_TO_BUS
        select CPU_NO_EFFICIENT_FFS
+       select MMU_GATHER_NO_RANGE if MMU
 
 # Endianness selection
 choice
@@ -58,15 +59,9 @@ config CPU_LITTLE_ENDIAN
 
 endchoice
 
-config RWSEM_GENERIC_SPINLOCK
-       def_bool y
-
 config ZONE_DMA
        def_bool y
 
-config RWSEM_XCHGADD_ALGORITHM
-       bool
-
 config ARCH_HAS_ILOG2_U32
        def_bool n
 
index 791cc8d54d0a9eff79b5264a74b507fc3233699c..1a8285c3f693990c8a8f3f7d5d37d240b17c4a80 100644 (file)
@@ -17,6 +17,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += linkage.h
 generic-y += local.h
 generic-y += local64.h
index 220decd605a4aded46a99b445e54bf27c4adc821..833d3a53dab30182b586dd364cd323d1db07835a 100644 (file)
@@ -82,18 +82,22 @@ static inline void microblaze_set_syscall_arg(struct pt_regs *regs,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
+       unsigned int i = 0;
+       unsigned int n = 6;
+
        while (n--)
                *args++ = microblaze_get_syscall_arg(regs, i++);
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
+       unsigned int i = 0;
+       unsigned int n = 6;
+
        while (n--)
                microblaze_set_syscall_arg(regs, i++, *args++);
 }
index 99b6ded54849e2327e7593592c14aa73adee1429..628a78ee0a720975f6413cba6d32a9e342812345 100644 (file)
 #ifndef _ASM_MICROBLAZE_TLB_H
 #define _ASM_MICROBLAZE_TLB_H
 
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
 #include <linux/pagemap.h>
-
-#ifdef CONFIG_MMU
-#define tlb_start_vma(tlb, vma)                do { } while (0)
-#define tlb_end_vma(tlb, vma)          do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
-#endif
-
 #include <asm-generic/tlb.h>
 
 #endif /* _ASM_MICROBLAZE_TLB_H */
index 3ce84fbb2678f2194de4e81504494045a00915ed..13f59631c576c6bcd4c50357269fa42c0525e62a 100644 (file)
@@ -1,3 +1,2 @@
 generated-y += unistd_32.h
-generic-y += kvm_para.h
 generic-y += ucontext.h
index 8ee3a8c18498eb591ab9d1fc2b2044d43afa1cd4..4964947732af3e37bd5d651aaad9a3f3ccd39056 100644 (file)
 421    common  rt_sigtimedwait_time64          sys_rt_sigtimedwait
 422    common  futex_time64                    sys_futex
 423    common  sched_rr_get_interval_time64    sys_sched_rr_get_interval
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index 4a5f5b0ee9a9e7d9988321e452bf87a7e13be9ab..b9c48b27162dc111aa6fee50d057f2cc4bee368d 100644 (file)
@@ -1037,13 +1037,6 @@ source "arch/mips/paravirt/Kconfig"
 
 endmenu
 
-config RWSEM_GENERIC_SPINLOCK
-       bool
-       default y
-
-config RWSEM_XCHGADD_ALGORITHM
-       bool
-
 config GENERIC_HWEIGHT
        bool
        default y
index 4a70c5de8c929bad778788db2f6c46b3cc2633a2..25a57895a3a359f6f7ded09785bf6e4cc15ea1db 100644 (file)
@@ -210,12 +210,6 @@ const char *get_system_type(void)
        return ath79_sys_type;
 }
 
-int get_c0_perfcount_int(void)
-{
-       return ATH79_MISC_IRQ(5);
-}
-EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
-
 unsigned int get_c0_compare_int(void)
 {
        return CP0_LEGACY_COMPARE_IRQ;
index 46eddbec8d9fdec090ee273e1e5ba3cd573ba612..0ab95dd431b3c0b33fd400dd631ae183c796d8c2 100644 (file)
@@ -24,6 +24,7 @@ void __init bcm47xx_workarounds(void)
        case BCM47XX_BOARD_NETGEAR_WNR3500L:
                bcm47xx_workarounds_enable_usb_power(12);
                break;
+       case BCM47XX_BOARD_NETGEAR_WNDR3400V2:
        case BCM47XX_BOARD_NETGEAR_WNDR3400_V3:
                bcm47xx_workarounds_enable_usb_power(21);
                break;
index f607888d24838be1c7d1ecd0fe7c20736b7d2ed1..184eb65a6ba71a5bea1e6b39cbe5d389a9764416 100644 (file)
@@ -1,6 +1,10 @@
 # require CONFIG_CPU_MIPS32_R2=y
 
 CONFIG_LEGACY_BOARD_OCELOT=y
+CONFIG_FIT_IMAGE_FDT_OCELOT=y
+
+CONFIG_BRIDGE=y
+CONFIG_GENERIC_PHY=y
 
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
@@ -19,6 +23,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 
 CONFIG_NETDEVICES=y
+CONFIG_NET_SWITCHDEV=y
+CONFIG_NET_DSA=y
 CONFIG_MSCC_OCELOT_SWITCH=y
 CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y
 CONFIG_MDIO_MSCC_MIIM=y
@@ -35,6 +41,8 @@ CONFIG_SPI_DESIGNWARE=y
 CONFIG_SPI_DW_MMIO=y
 CONFIG_SPI_SPIDEV=y
 
+CONFIG_PINCTRL_OCELOT=y
+
 CONFIG_GPIO_SYSFS=y
 
 CONFIG_POWER_RESET=y
index e77672539e8ed8f6744c03d49eaeb20c76d80b78..e4456e450f946d5c9c55b52d78aeee60d3a2a0e2 100644 (file)
 #endif
 
 #ifdef CONFIG_CPU_MICROMIPS
-#define NOP_INSN "nop32"
+#define B_INSN "b32"
 #else
-#define NOP_INSN "nop"
+#define B_INSN "b"
 #endif
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-       asm_volatile_goto("1:\t" NOP_INSN "\n\t"
-               "nop\n\t"
+       asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
+               "2:\tnop\n\t"
                ".pushsection __jump_table,  \"aw\"\n\t"
                WORD_INSN " 1b, %l[l_yes], %0\n\t"
                ".popsection\n\t"
index 6cf8ffb5367ec3fb725aac26c701d0ae5d81923c..a2b4748655df4d1466d037b971855c8046c589e8 100644 (file)
@@ -116,9 +116,10 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
+       unsigned int i = 0;
+       unsigned int n = 6;
        int ret;
 
        /* O32 ABI syscall() */
index b6823b9e94dad0c2f3b13ad867e1153d4f95c789..90f3ad76d9e0b03761ceec49eca71605065d1465 100644 (file)
@@ -5,23 +5,6 @@
 #include <asm/cpu-features.h>
 #include <asm/mipsregs.h>
 
-/*
- * MIPS doesn't need any special per-pte or per-vma handling, except
- * we need to flush cache for area to be unmapped.
- */
-#define tlb_start_vma(tlb, vma)                                        \
-       do {                                                    \
-               if (!tlb->fullmm)                               \
-                       flush_cache_range(vma, vma->vm_start, vma->vm_end); \
-       }  while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-
-/*
- * .. because we flush the whole mm when it fills up.
- */
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
 #define _UNIQUE_ENTRYHI(base, idx)                                     \
                (((base) + ((idx) << (PAGE_SHIFT + 1))) |               \
                 (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
index 6aa49c10f88f7e2073bc2bd4202d02cab6b0f46a..f0ccb5b90ce95b1e927f46ad850560e3745fea74 100644 (file)
 typedef long           __kernel_daddr_t;
 #define __kernel_daddr_t __kernel_daddr_t
 
-#if (_MIPS_SZLONG == 32)
-typedef struct {
-       long    val[2];
-} __kernel_fsid_t;
-#define __kernel_fsid_t __kernel_fsid_t
-#endif
-
 #include <asm-generic/posix_types.h>
 
 #endif /* _ASM_POSIX_TYPES_H */
index 6e574c02e4c3b81137618c97fe9bc176c5a40d52..ea781b29f7f17291d90391c87a8a250772d17a37 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/processor.h>
 #include <asm/sigcontext.h>
 #include <linux/uaccess.h>
+#include <asm/irq_regs.h>
 
 static struct hard_trap_info {
        unsigned char tt;       /* Trap type code for MIPS R3xxx and R4xxx */
@@ -214,7 +215,7 @@ void kgdb_call_nmi_hook(void *ignored)
        old_fs = get_fs();
        set_fs(KERNEL_DS);
 
-       kgdb_nmicallback(raw_smp_processor_id(), NULL);
+       kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
 
        set_fs(old_fs);
 }
index 0057c910bc2f34de0f518c43d2e234c845db0da1..3a62f80958e170527a93f4058d60f5372d5773ee 100644 (file)
@@ -1419,7 +1419,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
 
                sd.nr = syscall;
                sd.arch = syscall_get_arch();
-               syscall_get_arguments(current, regs, 0, 6, args);
+               syscall_get_arguments(current, regs, args);
                for (i = 0; i < 6; i++)
                        sd.args[i] = args[i];
                sd.instruction_pointer = KSTK_EIP(current);
index f158c5894a9a8760d3c1ec3430617bad976fac97..feb2653490dfe7a744b0eaa41d913297d9392ed5 100644 (file)
@@ -125,7 +125,7 @@ trace_a_syscall:
        subu    t1, v0,  __NR_O32_Linux
        move    a1, v0
        bnez    t1, 1f /* __NR_syscall at offset 0 */
-       lw      a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
+       ld      a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
        .set    pop
 
 1:     jal     syscall_trace_enter
index 15f4117900ee8d8c9285b61dd332b3c1832558ea..9392dfe33f97ec48a74014d3bc49940dafdc944d 100644 (file)
 421    n32     rt_sigtimedwait_time64          compat_sys_rt_sigtimedwait_time64
 422    n32     futex_time64                    sys_futex
 423    n32     sched_rr_get_interval_time64    sys_sched_rr_get_interval
+424    n32     pidfd_send_signal               sys_pidfd_send_signal
+425    n32     io_uring_setup                  sys_io_uring_setup
+426    n32     io_uring_enter                  sys_io_uring_enter
+427    n32     io_uring_register               sys_io_uring_register
index c85502e67b44145420d6638489300aae4388cefe..cd0c8aa21fbacfb7563c39123f0880d2b753a7c2 100644 (file)
 327    n64     rseq                            sys_rseq
 328    n64     io_pgetevents                   sys_io_pgetevents
 # 329 through 423 are reserved to sync up with other architectures
+424    n64     pidfd_send_signal               sys_pidfd_send_signal
+425    n64     io_uring_setup                  sys_io_uring_setup
+426    n64     io_uring_enter                  sys_io_uring_enter
+427    n64     io_uring_register               sys_io_uring_register
index 2e063d0f837e78c3cb566d68374c33e7bcdd9a8e..e849e8ffe4a25b4516cdc748abfa96bb2c918ebe 100644 (file)
 421    o32     rt_sigtimedwait_time64          sys_rt_sigtimedwait             compat_sys_rt_sigtimedwait_time64
 422    o32     futex_time64                    sys_futex                       sys_futex
 423    o32     sched_rr_get_interval_time64    sys_sched_rr_get_interval       sys_sched_rr_get_interval
+424    o32     pidfd_send_signal               sys_pidfd_send_signal
+425    o32     io_uring_setup                  sys_io_uring_setup
+426    o32     io_uring_enter                  sys_io_uring_enter
+427    o32     io_uring_register               sys_io_uring_register
index cb7e9ed7a453cd8982fca4da2b3ee659d52181cb..33ee0d18fb0adc21d952c98fbf77b5ed8c514d6a 100644 (file)
@@ -140,6 +140,13 @@ SECTIONS
        PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
 #endif
 
+#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
+       .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
+               *(.appended_dtb)
+               KEEP(*(.appended_dtb))
+       }
+#endif
+
 #ifdef CONFIG_RELOCATABLE
        . = ALIGN(4);
 
@@ -164,11 +171,6 @@ SECTIONS
        __appended_dtb = .;
        /* leave space for appended DTB */
        . += 0x100000;
-#elif defined(CONFIG_MIPS_ELF_APPENDED_DTB)
-       .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
-               *(.appended_dtb)
-               KEEP(*(.appended_dtb))
-       }
 #endif
        /*
         * Align to 64K in attempt to eliminate holes before the
index 9e33e45aa17c5d6881d6bc8cd5ca3c90d42098d0..b213cecb8e3ac4e76573e334c42cbde7c88636f4 100644 (file)
@@ -103,7 +103,7 @@ static struct irqaction ip6_irqaction = {
 static struct irqaction cascade_irqaction = {
        .handler = no_action,
        .name = "cascade",
-       .flags = IRQF_NO_THREAD,
+       .flags = IRQF_NO_THREAD | IRQF_NO_SUSPEND,
 };
 
 void __init mach_init_irq(void)
index 0effd3cba9a731907920c6f47a1b52321ac7225f..98bf0c222b5fe84c2086a8707172392323829d57 100644 (file)
@@ -186,8 +186,9 @@ enum which_ebpf_reg {
  * separate frame pointer, so BPF_REG_10 relative accesses are
  * adjusted to be $sp relative.
  */
-int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
-                    enum which_ebpf_reg w)
+static int ebpf_to_mips_reg(struct jit_ctx *ctx,
+                           const struct bpf_insn *insn,
+                           enum which_ebpf_reg w)
 {
        int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
                insn->src_reg : insn->dst_reg;
index 710a59764b01c164d3ffae92f18a394224bdc153..a32f843cdbe02299e34bf7f0897ad61f6e23dce5 100644 (file)
@@ -118,7 +118,6 @@ static void shutdown_bridge_irq(struct irq_data *d)
 {
        struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
        struct bridge_controller *bc;
-       int pin = hd->pin;
 
        if (!hd)
                return;
@@ -126,7 +125,7 @@ static void shutdown_bridge_irq(struct irq_data *d)
        disable_hub_irq(d);
 
        bc = hd->bc;
-       bridge_clr(bc, b_int_enable, (1 << pin));
+       bridge_clr(bc, b_int_enable, (1 << hd->pin));
        bridge_read(bc, b_wid_tflush);
 }
 
index addb7f5f52645c75e83025f075d2878efd186d02..55559ca0efe404ce78d7039f9fa09a72df7c0ad3 100644 (file)
@@ -60,9 +60,6 @@ config GENERIC_LOCKBREAK
         def_bool y
        depends on PREEMPT
 
-config RWSEM_GENERIC_SPINLOCK
-       def_bool y
-
 config TRACE_IRQFLAGS_SUPPORT
        def_bool y
 
index f7e5e86765fe8efe51283d7c350a6a4ab8b73863..671ebd357496c4e1608b240d2ba23c4d3d9efa42 100644 (file)
@@ -108,81 +108,41 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
  * syscall_get_arguments - extract system call parameter values
  * @task:      task of interest, must be blocked
  * @regs:      task_pt_regs() of @task
- * @i:         argument index [0,5]
- * @n:         number of arguments; n+i must be [1,6].
  * @args:      array filled with argument values
  *
- * Fetches @n arguments to the system call starting with the @i'th argument
- * (from 0 through 5).  Argument @i is stored in @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Fetches 6 arguments to the system call (from 0 through 5). The first
+ * argument is stored in @args[0], and so on.
  *
  * It's only valid to call this when @task is stopped for tracing on
  * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
  */
 #define SYSCALL_MAX_ARGS 6
 void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                          unsigned int i, unsigned int n, unsigned long *args)
+                          unsigned long *args)
 {
-       if (n == 0)
-               return;
-       if (i + n > SYSCALL_MAX_ARGS) {
-               unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
-               unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
-               pr_warning("%s called with max args %d, handling only %d\n",
-                          __func__, i + n, SYSCALL_MAX_ARGS);
-               memset(args_bad, 0, n_bad * sizeof(args[0]));
-               memset(args_bad, 0, n_bad * sizeof(args[0]));
-       }
-
-       if (i == 0) {
-               args[0] = regs->orig_r0;
-               args++;
-               i++;
-               n--;
-       }
-
-       memcpy(args, &regs->uregs[0] + i, n * sizeof(args[0]));
+       args[0] = regs->orig_r0;
+       args++;
+       memcpy(args, &regs->uregs[0] + 1, 5 * sizeof(args[0]));
 }
 
 /**
  * syscall_set_arguments - change system call parameter value
  * @task:      task of interest, must be in system call entry tracing
  * @regs:      task_pt_regs() of @task
- * @i:         argument index [0,5]
- * @n:         number of arguments; n+i must be [1,6].
  * @args:      array of argument values to store
  *
- * Changes @n arguments to the system call starting with the @i'th argument.
- * Argument @i gets value @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Changes 6 arguments to the system call. The first argument gets value
+ * @args[0], and so on.
  *
  * It's only valid to call this when @task is stopped for tracing on
  * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
  */
 void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
-                          unsigned int i, unsigned int n,
                           const unsigned long *args)
 {
-       if (n == 0)
-               return;
-
-       if (i + n > SYSCALL_MAX_ARGS) {
-               pr_warn("%s called with max args %d, handling only %d\n",
-                       __func__, i + n, SYSCALL_MAX_ARGS);
-               n = SYSCALL_MAX_ARGS - i;
-       }
-
-       if (i == 0) {
-               regs->orig_r0 = args[0];
-               args++;
-               i++;
-               n--;
-       }
+       regs->orig_r0 = args[0];
+       args++;
 
-       memcpy(&regs->uregs[0] + i, args, n * sizeof(args[0]));
+       memcpy(&regs->uregs[0] + 1, args, 5 * sizeof(args[0]));
 }
 #endif /* _ASM_NDS32_SYSCALL_H */
index b35ae5eae3ab3384cbfa7032e7a5345f351d1b36..d5ae571c8d303f4e87350e3a9644df4469ec4da2 100644 (file)
@@ -4,22 +4,6 @@
 #ifndef __ASMNDS32_TLB_H
 #define __ASMNDS32_TLB_H
 
-#define tlb_start_vma(tlb,vma)                                         \
-       do {                                                            \
-               if (!tlb->fullmm)                                       \
-                       flush_cache_range(vma, vma->vm_start, vma->vm_end); \
-       } while (0)
-
-#define tlb_end_vma(tlb,vma)                           \
-       do {                                            \
-               if(!tlb->fullmm)                        \
-                       flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
-       } while (0)
-
-#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
-
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
 #include <asm-generic/tlb.h>
 
 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
index 9b411f401903630fff9f66498e09a6f1f1f7ce3e..38ee769b18d8ad344e09819dff0fb93c24af94bc 100644 (file)
@@ -42,6 +42,5 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
 
 void update_mmu_cache(struct vm_area_struct *vma,
                      unsigned long address, pte_t * pte);
-void tlb_migrate_finish(struct mm_struct *mm);
 
 #endif
index 4ef15a61b7bc33ee199a84fb6c8ef36be2a9deac..ea37394ff3eab2ad16cb5d7f1f08fe9626431080 100644 (file)
@@ -24,6 +24,7 @@ config NIOS2
        select USB_ARCH_HAS_HCD if USB_SUPPORT
        select CPU_NO_EFFICIENT_FFS
        select ARCH_DISCARD_MEMBLOCK
+       select MMU_GATHER_NO_RANGE if MMU
 
 config GENERIC_CSUM
        def_bool y
@@ -40,9 +41,6 @@ config NO_IOPORT_MAP
 config FPU
        def_bool n
 
-config RWSEM_GENERIC_SPINLOCK
-       def_bool y
-
 config TRACE_IRQFLAGS_SUPPORT
        def_bool n
 
index 8fde4fa2c34f758df132e659eb69b4331c84172f..88a667d12aaa9cefafad5260f03e073fefeb1fed 100644 (file)
@@ -23,6 +23,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
index 9de220854c4ad88f43ea579cbcf51c250cb6e688..d7624ed06efb6c9ea2e616c23cd20b030a53b1c8 100644 (file)
@@ -58,81 +58,25 @@ static inline void syscall_set_return_value(struct task_struct *task,
 }
 
 static inline void syscall_get_arguments(struct task_struct *task,
-       struct pt_regs *regs, unsigned int i, unsigned int n,
-       unsigned long *args)
+       struct pt_regs *regs, unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               *args++ = regs->r4;
-       case 1:
-               if (!n--)
-                       break;
-               *args++ = regs->r5;
-       case 2:
-               if (!n--)
-                       break;
-               *args++ = regs->r6;
-       case 3:
-               if (!n--)
-                       break;
-               *args++ = regs->r7;
-       case 4:
-               if (!n--)
-                       break;
-               *args++ = regs->r8;
-       case 5:
-               if (!n--)
-                       break;
-               *args++ = regs->r9;
-       case 6:
-               if (!n--)
-                       break;
-       default:
-               BUG();
-       }
+       *args++ = regs->r4;
+       *args++ = regs->r5;
+       *args++ = regs->r6;
+       *args++ = regs->r7;
+       *args++ = regs->r8;
+       *args   = regs->r9;
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
-       struct pt_regs *regs, unsigned int i, unsigned int n,
-       const unsigned long *args)
+       struct pt_regs *regs, const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               regs->r4 = *args++;
-       case 1:
-               if (!n--)
-                       break;
-               regs->r5 = *args++;
-       case 2:
-               if (!n--)
-                       break;
-               regs->r6 = *args++;
-       case 3:
-               if (!n--)
-                       break;
-               regs->r7 = *args++;
-       case 4:
-               if (!n--)
-                       break;
-               regs->r8 = *args++;
-       case 5:
-               if (!n--)
-                       break;
-               regs->r9 = *args++;
-       case 6:
-               if (!n)
-                       break;
-       default:
-               BUG();
-       }
+       regs->r4 = *args++;
+       regs->r5 = *args++;
+       regs->r6 = *args++;
+       regs->r7 = *args++;
+       regs->r8 = *args++;
+       regs->r9 = *args;
 }
 
 #endif
index d3bc648e08b5dad86e5e9c449e655fe291381c91..f9f2e27e32dd5e7768ba9a63faac98b18516f41d 100644 (file)
 #ifndef _ASM_NIOS2_TLB_H
 #define _ASM_NIOS2_TLB_H
 
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
 extern void set_mmu_pid(unsigned long pid);
 
 /*
- * NiosII doesn't need any special per-pte or per-vma handling, except
- * we need to flush cache for the area to be unmapped.
+ * NIOS32 does have flush_tlb_range(), but it lacks a limit and fallback to
+ * full mm invalidation. So use flush_tlb_mm() for everything.
  */
-#define tlb_start_vma(tlb, vma)                                        \
-       do {                                                    \
-               if (!tlb->fullmm)                               \
-                       flush_cache_range(vma, vma->vm_start, vma->vm_end); \
-       }  while (0)
-
-#define tlb_end_vma(tlb, vma)  do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address)     do { } while (0)
 
 #include <linux/pagemap.h>
 #include <asm-generic/tlb.h>
index 755bb11323d8feb92d12349bf444f151cd8acc29..1c72f04ff75da1a7f6918f00b14116a183a79313 100644 (file)
@@ -1,2 +1 @@
-generic-y += kvm_para.h
 generic-y += ucontext.h
index a5e361fbb75a01400681b6238b21a7eea18b9607..7cfb20555b100508527be57172e99f0431fcb72f 100644 (file)
@@ -36,6 +36,7 @@ config OPENRISC
        select OMPIC if SMP
        select ARCH_WANT_FRAME_POINTERS
        select GENERIC_IRQ_MULTI_HANDLER
+       select MMU_GATHER_NO_RANGE if MMU
 
 config CPU_BIG_ENDIAN
        def_bool y
@@ -43,12 +44,6 @@ config CPU_BIG_ENDIAN
 config MMU
        def_bool y
 
-config RWSEM_GENERIC_SPINLOCK
-       def_bool y
-
-config RWSEM_XCHGADD_ALGORITHM
-       def_bool n
-
 config GENERIC_HWEIGHT
        def_bool y
 
index 5a73e2956ac46953ac9e1cf2404c3d0996617602..22aa97136c0195ae2b687c0793c42e43f22888ec 100644 (file)
@@ -20,6 +20,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
index 2db9f1cf0694c0f2c6bdaec77953f62fb4fe6372..b4ff07c1baed5d13c9abb0d025a1ece11ee78ad0 100644 (file)
@@ -56,20 +56,16 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
 
 static inline void
 syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, unsigned long *args)
+                     unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0]));
+       memcpy(args, &regs->gpr[3], 6 * sizeof(args[0]));
 }
 
 static inline void
 syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
-                     unsigned int i, unsigned int n, const unsigned long *args)
+                     const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-
-       memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+       memcpy(&regs->gpr[3], args, 6 * sizeof(args[0]));
 }
 
 static inline int syscall_get_arch(void)
index fa4376a4515d14a8921ae7c3e382c0ee6398952e..92d8a42098849dcffcc5ceb07dc8c15b00f992c9 100644 (file)
 #define __ASM_OPENRISC_TLB_H__
 
 /*
- * or32 doesn't need any special per-pte or
- * per-vma handling..
+ * OpenRISC doesn't have an efficient flush_tlb_range() so use flush_tlb_mm()
+ * for everything.
  */
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
 
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
 #include <linux/pagemap.h>
 #include <asm-generic/tlb.h>
 
index 755bb11323d8feb92d12349bf444f151cd8acc29..1c72f04ff75da1a7f6918f00b14116a183a79313 100644 (file)
@@ -1,2 +1 @@
-generic-y += kvm_para.h
 generic-y += ucontext.h
index c8e621296092d83751ecdec774fb46aa430c6c04..f1ed8ddfe48697b0fa02810fbcb22763d1bd1302 100644 (file)
@@ -75,12 +75,6 @@ config GENERIC_LOCKBREAK
        default y
        depends on SMP && PREEMPT
 
-config RWSEM_GENERIC_SPINLOCK
-       def_bool y
-
-config RWSEM_XCHGADD_ALGORITHM
-       bool
-
 config ARCH_HAS_ILOG2_U32
        bool
        default n
index 6f49e77d82a2a48673356355e38a411794ff51bb..9bcd0c903dbbef2aee61ade11844c5091e4bfd0c 100644 (file)
@@ -11,6 +11,7 @@ generic-y += irq_regs.h
 generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
index 2a27b275ab092cc60b3d003250aaaf647aa9c916..9ff033d261ab381c9e356fea458d768170f9effc 100644 (file)
@@ -22,13 +22,14 @@ unsigned long profile_pc(struct pt_regs *);
 
 static inline unsigned long regs_return_value(struct pt_regs *regs)
 {
-       return regs->gr[20];
+       return regs->gr[28];
 }
 
 static inline void instruction_pointer_set(struct pt_regs *regs,
                                                unsigned long val)
 {
-        regs->iaoq[0] = val;
+       regs->iaoq[0] = val;
+       regs->iaoq[1] = val + 4;
 }
 
 /* Query offset/name of register from its name/offset */
index 8bff1a58c97f1b107dabf79e172f5ecb56c5db2d..62a6d477fae0197cdba9d62044e31104f1b05192 100644 (file)
@@ -18,29 +18,15 @@ static inline long syscall_get_nr(struct task_struct *tsk,
 }
 
 static inline void syscall_get_arguments(struct task_struct *tsk,
-                                        struct pt_regs *regs, unsigned int i,
-                                        unsigned int n, unsigned long *args)
+                                        struct pt_regs *regs,
+                                        unsigned long *args)
 {
-       BUG_ON(i);
-
-       switch (n) {
-       case 6:
-               args[5] = regs->gr[21];
-       case 5:
-               args[4] = regs->gr[22];
-       case 4:
-               args[3] = regs->gr[23];
-       case 3:
-               args[2] = regs->gr[24];
-       case 2:
-               args[1] = regs->gr[25];
-       case 1:
-               args[0] = regs->gr[26];
-       case 0:
-               break;
-       default:
-               BUG();
-       }
+       args[5] = regs->gr[21];
+       args[4] = regs->gr[22];
+       args[3] = regs->gr[23];
+       args[2] = regs->gr[24];
+       args[1] = regs->gr[25];
+       args[0] = regs->gr[26];
 }
 
 static inline long syscall_get_return_value(struct task_struct *task,
index 0c881e74d8a62cd6a4e6082178299a118b58a5d9..8c0446b04c9e17f593cac1d4fa3671658766038d 100644 (file)
@@ -2,24 +2,6 @@
 #ifndef _PARISC_TLB_H
 #define _PARISC_TLB_H
 
-#define tlb_flush(tlb)                 \
-do {   if ((tlb)->fullmm)              \
-               flush_tlb_mm((tlb)->mm);\
-} while (0)
-
-#define tlb_start_vma(tlb, vma) \
-do {   if (!(tlb)->fullmm)     \
-               flush_cache_range(vma, vma->vm_start, vma->vm_end); \
-} while (0)
-
-#define tlb_end_vma(tlb, vma)  \
-do {   if (!(tlb)->fullmm)     \
-               flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
-} while (0)
-
-#define __tlb_remove_tlb_entry(tlb, pte, address) \
-       do { } while (0)
-
 #include <asm-generic/tlb.h>
 
 #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
index 22fdbd08cdc8551777459efe75c49bbfa1e3f95a..2bd5b392277c2cf5c4a52f3d0b7d9aaed7382f44 100644 (file)
@@ -1,3 +1,2 @@
 generated-y += unistd_32.h
 generated-y += unistd_64.h
-generic-y += kvm_para.h
index eb39e7e380d7e27b24f6bae39ae0e6c3583511e3..841db71958cdb50dff183dd058a9b09a5ec81421 100644 (file)
@@ -210,12 +210,6 @@ void __cpuidle arch_cpu_idle(void)
 
 static int __init parisc_idle_init(void)
 {
-       const char *marker;
-
-       /* check QEMU/SeaBIOS marker in PAGE0 */
-       marker = (char *) &PAGE0->pad0;
-       running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
-
        if (!running_on_qemu)
                cpu_idle_poll_ctrl(1);
 
index 15dd9e21be7eac6d1fcf37d67f72de9b227bfa75..d908058d05c10bf4880e361070c30a42b668fd7e 100644 (file)
@@ -397,6 +397,9 @@ void __init start_parisc(void)
        int ret, cpunum;
        struct pdc_coproc_cfg coproc_cfg;
 
+       /* check QEMU/SeaBIOS marker in PAGE0 */
+       running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
+
        cpunum = smp_processor_id();
 
        init_cpu_topology();
index ec5835e83a7a756c9fecda603f57f42bec6ed870..6f0b9c8d80523682f85ab3f927ecbe3f9e96f188 100644 (file)
@@ -29,22 +29,17 @@ static void dump_trace(struct task_struct *task, struct stack_trace *trace)
        }
 }
 
-
 /*
  * Save stack-backtrace addresses into a stack_trace buffer.
  */
 void save_stack_trace(struct stack_trace *trace)
 {
        dump_trace(current, trace);
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 EXPORT_SYMBOL_GPL(save_stack_trace);
 
 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 {
        dump_trace(tsk, trace);
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
index b26766c6647dc7a40fd3235460902112c20cd3d4..fe8ca623add89a627710b697f7886fc879589ac2 100644 (file)
 421    32      rt_sigtimedwait_time64          sys_rt_sigtimedwait             compat_sys_rt_sigtimedwait_time64
 422    32      futex_time64                    sys_futex                       sys_futex
 423    32      sched_rr_get_interval_time64    sys_sched_rr_get_interval       sys_sched_rr_get_interval
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index 2d0be82c30619bd405949b64c2847603caf5a67d..88a4fb3647a2eb80acc7a686b5f87dc94ac82300 100644 (file)
@@ -103,13 +103,6 @@ config LOCKDEP_SUPPORT
        bool
        default y
 
-config RWSEM_GENERIC_SPINLOCK
-       bool
-
-config RWSEM_XCHGADD_ALGORITHM
-       bool
-       default y
-
 config GENERIC_LOCKBREAK
        bool
        default y
@@ -218,6 +211,8 @@ config PPC
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
        select HAVE_RCU_TABLE_FREE              if SMP
+       select HAVE_RCU_TABLE_NO_INVALIDATE     if HAVE_RCU_TABLE_FREE
+       select HAVE_MMU_GATHER_PAGE_SIZE
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RELIABLE_STACKTRACE         if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
        select HAVE_SYSCALL_TRACEPOINTS
@@ -318,6 +313,10 @@ config ARCH_SUSPEND_POSSIBLE
                   (PPC_85xx && !PPC_E500MC) || PPC_86xx || PPC_PSERIES \
                   || 44x || 40x
 
+config ARCH_SUSPEND_NONZERO_CPU
+       def_bool y
+       depends on PPC_POWERNV || PPC_PSERIES
+
 config PPC_DCR_NATIVE
        bool
 
index 5ba131c30f6bcded4e65ccc40bb8aa2595e44ff1..1bcd468ab422dc100b120607b03d5d587850b453 100644 (file)
@@ -266,6 +266,7 @@ CONFIG_UDF_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
+CONFIG_HUGETLBFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS=y
index a0c132bedfae86965c2f7c850098b65420c2c5fc..36bda391e549f87dc477a9c0997e93b855e556b9 100644 (file)
@@ -8,6 +8,5 @@ generic-y += irq_regs.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += preempt.h
-generic-y += rwsem.h
 generic-y += vtime.h
 generic-y += msi.h
index d34ad1657d7b2c44cdb16683bfef359b651fd02d..8ddd4a91bdc1e2fe9a2e4a617b32cc0e6e15e572 100644 (file)
@@ -352,7 +352,7 @@ static inline bool strict_kernel_rwx_enabled(void)
 #if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) &&  \
        defined (CONFIG_PPC_64K_PAGES)
 #define MAX_PHYSMEM_BITS        51
-#else
+#elif defined(CONFIG_PPC64)
 #define MAX_PHYSMEM_BITS        46
 #endif
 
index c5698a523bb189dee5650398603c4ac8a0f5bc27..23f7ed796f38829a054b5c0851b04e581990bcbf 100644 (file)
 /* Misc instructions for BPF compiler */
 #define PPC_INST_LBZ                   0x88000000
 #define PPC_INST_LD                    0xe8000000
+#define PPC_INST_LDX                   0x7c00002a
 #define PPC_INST_LHZ                   0xa0000000
 #define PPC_INST_LWZ                   0x80000000
 #define PPC_INST_LHBRX                 0x7c00062c
 #define PPC_INST_STB                   0x98000000
 #define PPC_INST_STH                   0xb0000000
 #define PPC_INST_STD                   0xf8000000
+#define PPC_INST_STDX                  0x7c00012a
 #define PPC_INST_STDU                  0xf8000001
 #define PPC_INST_STW                   0x90000000
 #define PPC_INST_STWU                  0x94000000
index 1a0e7a8b1c811cf5d089c5ac68eb96d189ad702d..1243045bad2d633d4bd2df3d3e086bd2988987d0 100644 (file)
@@ -65,22 +65,20 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
        unsigned long val, mask = -1UL;
-
-       BUG_ON(i + n > 6);
+       unsigned int n = 6;
 
 #ifdef CONFIG_COMPAT
        if (test_tsk_thread_flag(task, TIF_32BIT))
                mask = 0xffffffff;
 #endif
        while (n--) {
-               if (n == 0 && i == 0)
+               if (n == 0)
                        val = regs->orig_gpr3;
                else
-                       val = regs->gpr[3 + i + n];
+                       val = regs->gpr[3 + n];
 
                args[n] = val & mask;
        }
@@ -88,15 +86,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+       memcpy(&regs->gpr[3], args, 6 * sizeof(args[0]));
 
        /* Also copy the first argument into orig_gpr3 */
-       if (i == 0 && n > 0)
-               regs->orig_gpr3 = args[0];
+       regs->orig_gpr3 = args[0];
 }
 
 static inline int syscall_get_arch(void)
index e24c67d5ba75a2a18bc68d6cdfdb7399b07ba460..34fba1ce27f7c6f1ea34afb82f1cc7fcef750edd 100644 (file)
@@ -27,8 +27,8 @@
 #define tlb_start_vma(tlb, vma)        do { } while (0)
 #define tlb_end_vma(tlb, vma)  do { } while (0)
 #define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
 
+#define tlb_flush tlb_flush
 extern void tlb_flush(struct mmu_gather *tlb);
 
 /* Get the generic bits... */
@@ -46,22 +46,6 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
 #endif
 }
 
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
-                                                    unsigned int page_size)
-{
-       if (!tlb->page_size)
-               tlb->page_size = page_size;
-       else if (tlb->page_size != page_size) {
-               if (!tlb->fullmm)
-                       tlb_flush_mmu(tlb);
-               /*
-                * update the page size after flush for the new
-                * mmu_gather.
-                */
-               tlb->page_size = page_size;
-       }
-}
-
 #ifdef CONFIG_SMP
 static inline int mm_is_core_local(struct mm_struct *mm)
 {
index 1afe90ade595e161016af3ca712b65f9990407f4..bbc06bd72b1f2497ef0a1120e631d4243f3f432a 100644 (file)
@@ -82,10 +82,10 @@ struct vdso_data {
        __u32 icache_block_size;                /* L1 i-cache block size     */
        __u32 dcache_log_block_size;            /* L1 d-cache log block size */
        __u32 icache_log_block_size;            /* L1 i-cache log block size */
-       __s32 wtom_clock_sec;                   /* Wall to monotonic clock */
-       __s32 wtom_clock_nsec;
-       struct timespec stamp_xtime;    /* xtime as at tb_orig_stamp */
-       __u32 stamp_sec_fraction;       /* fractional seconds of stamp_xtime */
+       __u32 stamp_sec_fraction;               /* fractional seconds of stamp_xtime */
+       __s32 wtom_clock_nsec;                  /* Wall to monotonic clock nsec */
+       __s64 wtom_clock_sec;                   /* Wall to monotonic clock sec */
+       struct timespec stamp_xtime;            /* xtime as at tb_orig_stamp */
        __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls  */
        __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
 };
index 6f1c11e0691f2dd937f77861f08a0778f91e8a30..7534ecff5e925b434e4613298503d66435326205 100644 (file)
@@ -24,9 +24,6 @@ BEGIN_MMU_FTR_SECTION
        li      r10,0
        mtspr   SPRN_SPRG_603_LRU,r10           /* init SW LRU tracking */
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
-       lis     r10, (swapper_pg_dir - PAGE_OFFSET)@h
-       ori     r10, r10, (swapper_pg_dir - PAGE_OFFSET)@l
-       mtspr   SPRN_SPRG_PGDIR, r10
 
 BEGIN_FTR_SECTION
        bl      __init_fpu_registers
index a5b8fbae56a03b491f0982562f3d590cff16ca5f..9481a117e24255173231ac687c9e99b730bff420 100644 (file)
@@ -656,11 +656,17 @@ EXC_COMMON_BEGIN(data_access_slb_common)
        ld      r4,PACA_EXSLB+EX_DAR(r13)
        std     r4,_DAR(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
+BEGIN_MMU_FTR_SECTION
+       /* HPT case, do SLB fault */
        bl      do_slb_fault
        cmpdi   r3,0
        bne-    1f
        b       fast_exception_return
 1:     /* Error case */
+MMU_FTR_SECTION_ELSE
+       /* Radix case, access is outside page table range */
+       li      r3,-EFAULT
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
        std     r3,RESULT(r1)
        bl      save_nvgprs
        RECONCILE_IRQ_STATE(r10, r11)
@@ -705,11 +711,17 @@ EXC_COMMON_BEGIN(instruction_access_slb_common)
        EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB)
        ld      r4,_NIP(r1)
        addi    r3,r1,STACK_FRAME_OVERHEAD
+BEGIN_MMU_FTR_SECTION
+       /* HPT case, do SLB fault */
        bl      do_slb_fault
        cmpdi   r3,0
        bne-    1f
        b       fast_exception_return
 1:     /* Error case */
+MMU_FTR_SECTION_ELSE
+       /* Radix case, access is outside page table range */
+       li      r3,-EFAULT
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
        std     r3,RESULT(r1)
        bl      save_nvgprs
        RECONCILE_IRQ_STATE(r10, r11)
index ce6a972f25849ea87774be809da0faa3d84d96f3..e25b615e9f9e642d34e9387aac7db652131a466f 100644 (file)
@@ -851,10 +851,9 @@ __secondary_start:
        tophys(r4,r2)
        addi    r4,r4,THREAD    /* phys address of our thread_struct */
        mtspr   SPRN_SPRG_THREAD,r4
-#ifdef CONFIG_PPC_RTAS
-       li      r3,0
-       stw     r3, RTAS_SP(r4)         /* 0 => not in RTAS */
-#endif
+       lis     r4, (swapper_pg_dir - PAGE_OFFSET)@h
+       ori     r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
+       mtspr   SPRN_SPRG_PGDIR, r4
 
        /* enable MMU and jump to start_secondary */
        li      r4,MSR_KERNEL
@@ -938,10 +937,9 @@ start_here:
        tophys(r4,r2)
        addi    r4,r4,THREAD    /* init task's THREAD */
        mtspr   SPRN_SPRG_THREAD,r4
-#ifdef CONFIG_PPC_RTAS
-       li      r3,0
-       stw     r3, RTAS_SP(r4)         /* 0 => not in RTAS */
-#endif
+       lis     r4, (swapper_pg_dir - PAGE_OFFSET)@h
+       ori     r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
+       mtspr   SPRN_SPRG_PGDIR, r4
 
        /* stack */
        lis     r1,init_thread_union@ha
index 683b5b3805bd17493d97c261afc19279ac76b69f..cd381e2291dfeb38a569fed214778838cef42a2e 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/kvm_host.h>
 #include <linux/init.h>
 #include <linux/export.h>
+#include <linux/kmemleak.h>
 #include <linux/kvm_para.h>
 #include <linux/slab.h>
 #include <linux/of.h>
@@ -712,6 +713,12 @@ static void kvm_use_magic_page(void)
 
 static __init void kvm_free_tmp(void)
 {
+       /*
+        * Inform kmemleak about the hole in the .bss section since the
+        * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
+        */
+       kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
+                          ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
        free_reserved_area(&kvm_tmp[kvm_tmp_index],
                           &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
 }
index 9b8631533e02a4559a4dfc4c23240db58e39192c..70568ccbd9fd5eae17014473aa415d9b472b7d86 100644 (file)
@@ -57,7 +57,7 @@ void setup_barrier_nospec(void)
        enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
                 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
 
-       if (!no_nospec)
+       if (!no_nospec && !cpu_mitigations_off())
                enable_barrier_nospec(enable);
 }
 
@@ -116,7 +116,7 @@ static int __init handle_nospectre_v2(char *p)
 early_param("nospectre_v2", handle_nospectre_v2);
 void setup_spectre_v2(void)
 {
-       if (no_spectrev2)
+       if (no_spectrev2 || cpu_mitigations_off())
                do_btb_flush_fixups();
        else
                btb_flush_enabled = true;
@@ -190,29 +190,22 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
        bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
        ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
 
-       if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
-               bool comma = false;
+       if (bcs || ccd) {
                seq_buf_printf(&s, "Mitigation: ");
 
-               if (bcs) {
+               if (bcs)
                        seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
-                       comma = true;
-               }
 
-               if (ccd) {
-                       if (comma)
-                               seq_buf_printf(&s, ", ");
-                       seq_buf_printf(&s, "Indirect branch cache disabled");
-                       comma = true;
-               }
-
-               if (comma)
+               if (bcs && ccd)
                        seq_buf_printf(&s, ", ");
 
-               seq_buf_printf(&s, "Software count cache flush");
+               if (ccd)
+                       seq_buf_printf(&s, "Indirect branch cache disabled");
+       } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
+               seq_buf_printf(&s, "Mitigation: Software count cache flush");
 
                if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
-                       seq_buf_printf(&s, "(hardware accelerated)");
+                       seq_buf_printf(&s, " (hardware accelerated)");
        } else if (btb_flush_enabled) {
                seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
        } else {
@@ -307,7 +300,7 @@ void setup_stf_barrier(void)
 
        stf_enabled_flush_types = type;
 
-       if (!no_stf_barrier)
+       if (!no_stf_barrier && !cpu_mitigations_off())
                stf_barrier_enable(enable);
 }
 
index ba404dd9ce1d88809e0a6e70f0decc286caf576a..4f49e1a3594c2d3423ae232152cda2b53e730483 100644 (file)
@@ -932,7 +932,7 @@ void setup_rfi_flush(enum l1d_flush_type types, bool enable)
 
        enabled_flush_types = types;
 
-       if (!no_rfi_flush)
+       if (!no_rfi_flush && !cpu_mitigations_off())
                rfi_flush_enable(enable);
 }
 
index b18abb0c3dae6248cfd697b1b9ee2343c39e1ba3..00f5a63c8d9a65aefd60df95b75d9cfae1fe8493 100644 (file)
 421    32      rt_sigtimedwait_time64          sys_rt_sigtimedwait             compat_sys_rt_sigtimedwait_time64
 422    32      futex_time64                    sys_futex                       sys_futex
 423    32      sched_rr_get_interval_time64    sys_sched_rr_get_interval       sys_sched_rr_get_interval
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index 1e0bc5955a400601b106949f14c7a0ca64d1a6a6..afd516b572f8637447315ec882c08189bcf2fb4d 100644 (file)
@@ -98,7 +98,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
         * can be used, r7 contains NSEC_PER_SEC.
         */
 
-       lwz     r5,WTOM_CLOCK_SEC(r9)
+       lwz     r5,(WTOM_CLOCK_SEC+LOPART)(r9)
        lwz     r6,WTOM_CLOCK_NSEC(r9)
 
        /* We now have our offset in r5,r6. We create a fake dependency
index a4ed9edfd5f0b694288478683858f0ed3f306516..1f324c28705bc799b48172c232d6f193e6520eae 100644 (file)
@@ -92,7 +92,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
         * At this point, r4,r5 contain our sec/nsec values.
         */
 
-       lwa     r6,WTOM_CLOCK_SEC(r3)
+       l     r6,WTOM_CLOCK_SEC(r3)
        lwa     r9,WTOM_CLOCK_NSEC(r3)
 
        /* We now have our result in r6,r9. We create a fake dependency
@@ -125,7 +125,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
        bne     cr6,75f
 
        /* CLOCK_MONOTONIC_COARSE */
-       lwa     r6,WTOM_CLOCK_SEC(r3)
+       l     r6,WTOM_CLOCK_SEC(r3)
        lwa     r9,WTOM_CLOCK_NSEC(r3)
 
        /* check if counter has updated */
index f02b049737109c670b1af440f9f5704bbdf0afc0..f100e331e69b6ad37f5f6323219ada40fe8c8641 100644 (file)
@@ -543,14 +543,14 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
        if (ret != H_SUCCESS)
                return ret;
 
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
+
        ret = kvmppc_tce_validate(stt, tce);
        if (ret != H_SUCCESS)
-               return ret;
+               goto unlock_exit;
 
        dir = iommu_tce_direction(tce);
 
-       idx = srcu_read_lock(&vcpu->kvm->srcu);
-
        if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
                ret = H_PARAMETER;
                goto unlock_exit;
index 06964350b97a94118d065d90a257c882b5280136..b2b29d4f9842877db15addda76a19eb061f7c858 100644 (file)
@@ -3423,7 +3423,9 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
        vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
        vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
 
-       mtspr(SPRN_PSSCR, host_psscr);
+       /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
+       mtspr(SPRN_PSSCR, host_psscr |
+             (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
        mtspr(SPRN_HFSCR, host_hfscr);
        mtspr(SPRN_CIABR, host_ciabr);
        mtspr(SPRN_DAWR, host_dawr);
index 844d8e774492e65929168bfff4d0655fa50dda74..b7f6f6e0b6e801c6cf0fbb1d11d5c0d53014fb4d 100644 (file)
@@ -215,11 +215,20 @@ _GLOBAL_TOC(memcmp)
        beq     .Lzero
 
 .Lcmp_rest_lt8bytes:
-       /* Here we have only less than 8 bytes to compare with. at least s1
-        * Address is aligned with 8 bytes.
-        * The next double words are load and shift right with appropriate
-        * bits.
+       /*
+        * Here we have less than 8 bytes to compare. At least s1 is aligned to
+        * 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a
+        * page boundary, otherwise we might read past the end of the buffer and
+        * trigger a page fault. We use 4K as the conservative minimum page
+        * size. If we detect that case we go to the byte-by-byte loop.
+        *
+        * Otherwise the next double word is loaded from s1 and s2, and shifted
+        * right to compare the appropriate bits.
         */
+       clrldi  r6,r4,(64-12)   // r6 = r4 & 0xfff
+       cmpdi   r6,0xff8
+       bgt     .Lshort
+
        subfic  r6,r5,8
        slwi    r6,r6,3
        LD      rA,0,r3
index 1f13494efb2bfa9b50996ce6fda0711b9e933008..a6c491f18a04e2cfee3e61fbb6a0e37f955f5f16 100644 (file)
@@ -70,12 +70,12 @@ _GLOBAL(hash_page)
        lis     r0,KERNELBASE@h         /* check if kernel address */
        cmplw   0,r4,r0
        ori     r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
-       mfspr   r5, SPRN_SPRG_PGDIR     /* virt page-table root */
+       mfspr   r5, SPRN_SPRG_PGDIR     /* phys page-table root */
        blt+    112f                    /* assume user more likely */
-       lis     r5,swapper_pg_dir@ha    /* if kernel address, use */
-       addi    r5,r5,swapper_pg_dir@l  /* kernel page table */
+       lis     r5, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
+       addi    r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
        rlwimi  r3,r9,32-12,29,29       /* MSR_PR -> _PAGE_USER */
-112:   tophys(r5, r5)
+112:
 #ifndef CONFIG_PTE_64BIT
        rlwimi  r5,r4,12,20,29          /* insert top 10 bits of address */
        lwz     r8,0(r5)                /* get pmd entry */
index e7a9c4f6bfca49585beffcb6fc3dc755eb054e8f..8330f135294f48ecfff9bb5d3555f6fa3e3514c3 100644 (file)
@@ -95,28 +95,15 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
                              unsigned long entries, unsigned long dev_hpa,
                              struct mm_iommu_table_group_mem_t **pmem)
 {
-       struct mm_iommu_table_group_mem_t *mem;
-       long i, ret, locked_entries = 0;
+       struct mm_iommu_table_group_mem_t *mem, *mem2;
+       long i, ret, locked_entries = 0, pinned = 0;
        unsigned int pageshift;
-
-       mutex_lock(&mem_list_mutex);
-
-       list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
-                       next) {
-               /* Overlap? */
-               if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
-                               (ua < (mem->ua +
-                                      (mem->entries << PAGE_SHIFT)))) {
-                       ret = -EINVAL;
-                       goto unlock_exit;
-               }
-
-       }
+       unsigned long entry, chunk;
 
        if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
                ret = mm_iommu_adjust_locked_vm(mm, entries, true);
                if (ret)
-                       goto unlock_exit;
+                       return ret;
 
                locked_entries = entries;
        }
@@ -148,17 +135,27 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
        }
 
        down_read(&mm->mmap_sem);
-       ret = get_user_pages_longterm(ua, entries, FOLL_WRITE, mem->hpages, NULL);
+       chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
+                       sizeof(struct vm_area_struct *);
+       chunk = min(chunk, entries);
+       for (entry = 0; entry < entries; entry += chunk) {
+               unsigned long n = min(entries - entry, chunk);
+
+               ret = get_user_pages_longterm(ua + (entry << PAGE_SHIFT), n,
+                               FOLL_WRITE, mem->hpages + entry, NULL);
+               if (ret == n) {
+                       pinned += n;
+                       continue;
+               }
+               if (ret > 0)
+                       pinned += ret;
+               break;
+       }
        up_read(&mm->mmap_sem);
-       if (ret != entries) {
-               /* free the reference taken */
-               for (i = 0; i < ret; i++)
-                       put_page(mem->hpages[i]);
-
-               vfree(mem->hpas);
-               kfree(mem);
-               ret = -EFAULT;
-               goto unlock_exit;
+       if (pinned != entries) {
+               if (!ret)
+                       ret = -EFAULT;
+               goto free_exit;
        }
 
        pageshift = PAGE_SHIFT;
@@ -183,21 +180,43 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
        }
 
 good_exit:
-       ret = 0;
        atomic64_set(&mem->mapped, 1);
        mem->used = 1;
        mem->ua = ua;
        mem->entries = entries;
-       *pmem = mem;
 
-       list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
+       mutex_lock(&mem_list_mutex);
 
-unlock_exit:
-       if (locked_entries && ret)
-               mm_iommu_adjust_locked_vm(mm, locked_entries, false);
+       list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) {
+               /* Overlap? */
+               if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
+                               (ua < (mem2->ua +
+                                      (mem2->entries << PAGE_SHIFT)))) {
+                       ret = -EINVAL;
+                       mutex_unlock(&mem_list_mutex);
+                       goto free_exit;
+               }
+       }
+
+       list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
 
        mutex_unlock(&mem_list_mutex);
 
+       *pmem = mem;
+
+       return 0;
+
+free_exit:
+       /* free the reference taken */
+       for (i = 0; i < pinned; i++)
+               put_page(mem->hpages[i]);
+
+       vfree(mem->hpas);
+       kfree(mem);
+
+unlock_exit:
+       mm_iommu_adjust_locked_vm(mm, locked_entries, false);
+
        return ret;
 }
 
@@ -266,7 +285,7 @@ static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
 long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
 {
        long ret = 0;
-       unsigned long entries, dev_hpa;
+       unsigned long unlock_entries = 0;
 
        mutex_lock(&mem_list_mutex);
 
@@ -287,17 +306,17 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
                goto unlock_exit;
        }
 
+       if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
+               unlock_entries = mem->entries;
+
        /* @mapped became 0 so now mappings are disabled, release the region */
-       entries = mem->entries;
-       dev_hpa = mem->dev_hpa;
        mm_iommu_release(mem);
 
-       if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
-               mm_iommu_adjust_locked_vm(mm, entries, false);
-
 unlock_exit:
        mutex_unlock(&mem_list_mutex);
 
+       mm_iommu_adjust_locked_vm(mm, unlock_entries, false);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(mm_iommu_put);
index f29d2f118b444aa6b060bcfa6fab6fb0bf321949..5d9c3ff728c9a96cfffd232466096473e091be65 100644 (file)
@@ -98,10 +98,20 @@ static int find_free_bat(void)
        return -1;
 }
 
+/*
+ * This function calculates the size of the larger block usable to map the
+ * beginning of an area based on the start address and size of that area:
+ * - max block size is 8M on 601 and 256 on other 6xx.
+ * - base address must be aligned to the block size. So the maximum block size
+ *   is identified by the lowest bit set to 1 in the base address (for instance
+ *   if base is 0x16000000, max size is 0x02000000).
+ * - block size has to be a power of two. This is calculated by finding the
+ *   highest bit set to 1.
+ */
 static unsigned int block_size(unsigned long base, unsigned long top)
 {
        unsigned int max_size = (cpu_has_feature(CPU_FTR_601) ? 8 : 256) << 20;
-       unsigned int base_shift = (fls(base) - 1) & 31;
+       unsigned int base_shift = (ffs(base) - 1) & 31;
        unsigned int block_shift = (fls(top - base) - 1) & 31;
 
        return min3(max_size, 1U << base_shift, 1U << block_shift);
@@ -157,7 +167,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to
 
 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
 {
-       int done;
+       unsigned long done;
        unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
 
        if (__map_without_bats) {
@@ -169,10 +179,10 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
                return __mmu_mapin_ram(base, top);
 
        done = __mmu_mapin_ram(base, border);
-       if (done != border - base)
+       if (done != border)
                return done;
 
-       return done + __mmu_mapin_ram(border, top);
+       return __mmu_mapin_ram(border, top);
 }
 
 void mmu_mark_initmem_nx(void)
index 549e9490ff2aabd79e7e8a7acc970d8a599cba79..dcac37745b05cfcc70b89fafcda172e3406956a0 100644 (file)
@@ -51,6 +51,8 @@
 #define PPC_LIS(r, i)          PPC_ADDIS(r, 0, i)
 #define PPC_STD(r, base, i)    EMIT(PPC_INST_STD | ___PPC_RS(r) |            \
                                     ___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_STDX(r, base, b)   EMIT(PPC_INST_STDX | ___PPC_RS(r) |           \
+                                    ___PPC_RA(base) | ___PPC_RB(b))
 #define PPC_STDU(r, base, i)   EMIT(PPC_INST_STDU | ___PPC_RS(r) |           \
                                     ___PPC_RA(base) | ((i) & 0xfffc))
 #define PPC_STW(r, base, i)    EMIT(PPC_INST_STW | ___PPC_RS(r) |            \
@@ -65,7 +67,9 @@
 #define PPC_LBZ(r, base, i)    EMIT(PPC_INST_LBZ | ___PPC_RT(r) |            \
                                     ___PPC_RA(base) | IMM_L(i))
 #define PPC_LD(r, base, i)     EMIT(PPC_INST_LD | ___PPC_RT(r) |             \
-                                    ___PPC_RA(base) | IMM_L(i))
+                                    ___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_LDX(r, base, b)    EMIT(PPC_INST_LDX | ___PPC_RT(r) |            \
+                                    ___PPC_RA(base) | ___PPC_RB(b))
 #define PPC_LWZ(r, base, i)    EMIT(PPC_INST_LWZ | ___PPC_RT(r) |            \
                                     ___PPC_RA(base) | IMM_L(i))
 #define PPC_LHZ(r, base, i)    EMIT(PPC_INST_LHZ | ___PPC_RT(r) |            \
                                        ___PPC_RA(a) | ___PPC_RB(b))
 #define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) |          \
                                        ___PPC_RA(a) | ___PPC_RB(b))
-
-#ifdef CONFIG_PPC64
-#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
-#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
-#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
-#else
-#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
-#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
-#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
-#endif
-
 #define PPC_CMPWI(a, i)                EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
 #define PPC_CMPDI(a, i)                EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
 #define PPC_CMPW(a, b)         EMIT(PPC_INST_CMPW | ___PPC_RA(a) |           \
index dc50a8d4b3b972a479aa2b00b1ea1c46db2977e2..21744d8aa053118f138f4a98d4097da2b2262fa6 100644 (file)
@@ -122,6 +122,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
 #define PPC_NTOHS_OFFS(r, base, i)     PPC_LHZ_OFFS(r, base, i)
 #endif
 
+#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
+#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
+#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
+
 #define SEEN_DATAREF 0x10000 /* might call external helpers */
 #define SEEN_XREG    0x20000 /* X reg is used */
 #define SEEN_MEM     0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
index 3609be4692b35e948f3ceff98c39c1b3bdaea744..47f441f351a6211c854ab3e7569a8ffa9e3dd943 100644 (file)
@@ -68,6 +68,26 @@ static const int b2p[] = {
 /* PPC NVR range -- update this if we ever use NVRs below r27 */
 #define BPF_PPC_NVR_MIN                27
 
+/*
+ * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
+ * so ensure that it isn't in use already.
+ */
+#define PPC_BPF_LL(r, base, i) do {                                          \
+                               if ((i) % 4) {                                \
+                                       PPC_LI(b2p[TMP_REG_2], (i));          \
+                                       PPC_LDX(r, base, b2p[TMP_REG_2]);     \
+                               } else                                        \
+                                       PPC_LD(r, base, i);                   \
+                               } while(0)
+#define PPC_BPF_STL(r, base, i) do {                                         \
+                               if ((i) % 4) {                                \
+                                       PPC_LI(b2p[TMP_REG_2], (i));          \
+                                       PPC_STDX(r, base, b2p[TMP_REG_2]);    \
+                               } else                                        \
+                                       PPC_STD(r, base, i);                  \
+                               } while(0)
+#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
+
 #define SEEN_FUNC      0x1000 /* might call external helpers */
 #define SEEN_STACK     0x2000 /* uses BPF stack */
 #define SEEN_TAILCALL  0x4000 /* uses tail calls */
index 4194d3cfb60cd0702487a83174f85b29806520ec..21a1dcd4b156c4bc926eccd5bd2375383a7705d2 100644 (file)
@@ -252,7 +252,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
         * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
         *   goto out;
         */
-       PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
+       PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
        PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
        PPC_BCC(COND_GT, out);
 
@@ -265,7 +265,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
        /* prog = array->ptrs[index]; */
        PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
        PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
-       PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
+       PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
 
        /*
         * if (prog == NULL)
@@ -275,7 +275,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
        PPC_BCC(COND_EQ, out);
 
        /* goto *(prog->bpf_func + prologue_size); */
-       PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
+       PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
 #ifdef PPC64_ELF_ABI_v1
        /* skip past the function descriptor */
        PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
@@ -606,7 +606,7 @@ bpf_alu32_trunc:
                                 * the instructions generated will remain the
                                 * same across all passes
                                 */
-                               PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
+                               PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
                                PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
                                PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
                                break;
@@ -662,7 +662,7 @@ emit_clear:
                                PPC_LI32(b2p[TMP_REG_1], imm);
                                src_reg = b2p[TMP_REG_1];
                        }
-                       PPC_STD(src_reg, dst_reg, off);
+                       PPC_BPF_STL(src_reg, dst_reg, off);
                        break;
 
                /*
@@ -709,7 +709,7 @@ emit_clear:
                        break;
                /* dst = *(u64 *)(ul) (src + off) */
                case BPF_LDX | BPF_MEM | BPF_DW:
-                       PPC_LD(dst_reg, src_reg, off);
+                       PPC_BPF_LL(dst_reg, src_reg, off);
                        break;
 
                /*
index 842b2c7e156aba4cb2a04d8897fb7aa6128c3b4d..50cd09b4e05d51a9d9c46722065f9ebf5e55295c 100644 (file)
@@ -324,7 +324,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
 
 config PPC_RADIX_MMU
        bool "Radix MMU Support"
-       depends on PPC_BOOK3S_64
+       depends on PPC_BOOK3S_64 && HUGETLB_PAGE
        select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
        default y
        help
index 6ed22127391b6d0a7789bb363476452bf0991a65..921f12182f3e01a850372fd51b0a88a5bede296c 100644 (file)
@@ -77,18 +77,27 @@ static u32 cpu_to_drc_index(int cpu)
 
                ret = drc.drc_index_start + (thread_index * drc.sequential_inc);
        } else {
-               const __be32 *indexes;
-
-               indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
-               if (indexes == NULL)
-                       goto err_of_node_put;
+               u32 nr_drc_indexes, thread_drc_index;
 
                /*
-                * The first element indexes[0] is the number of drc_indexes
-                * returned in the list.  Hence thread_index+1 will get the
-                * drc_index corresponding to core number thread_index.
+                * The first element of ibm,drc-indexes array is the
+                * number of drc_indexes returned in the list.  Hence
+                * thread_index+1 will get the drc_index corresponding
+                * to core number thread_index.
                 */
-               ret = indexes[thread_index + 1];
+               rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
+                                               0, &nr_drc_indexes);
+               if (rc)
+                       goto err_of_node_put;
+
+               WARN_ON_ONCE(thread_index > nr_drc_indexes);
+               rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
+                                               thread_index + 1,
+                                               &thread_drc_index);
+               if (rc)
+                       goto err_of_node_put;
+
+               ret = thread_drc_index;
        }
 
        rc = 0;
index d97d52772789b70187c5a2336c6fdee76fc48154..452dcfd7e5dd15083715eceef980642fdfb83efd 100644 (file)
@@ -550,6 +550,7 @@ static void pseries_print_mce_info(struct pt_regs *regs,
                "UE",
                "SLB",
                "ERAT",
+               "Unknown",
                "TLB",
                "D-Cache",
                "Unknown",
index eb56c82d8aa14caf0e6f5507102dd22fb208b9ac..0582260fb6c20f31a68075c217382d30b4a0852c 100644 (file)
@@ -69,9 +69,6 @@ config STACKTRACE_SUPPORT
 config TRACE_IRQFLAGS_SUPPORT
        def_bool y
 
-config RWSEM_GENERIC_SPINLOCK
-       def_bool y
-
 config GENERIC_BUG
        def_bool y
        depends on BUG
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
new file mode 100644 (file)
index 0000000..1a911ed
--- /dev/null
@@ -0,0 +1,84 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_ARCH_RV32I=y
+CONFIG_SMP=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCIE_XILINX=y
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
+CONFIG_MACB=y
+CONFIG_E1000E=y
+CONFIG_R8169=y
+CONFIG_MICROSEMI_PHY=y
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
+CONFIG_HVC_RISCV_SBI=y
+# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_SIFIVE_PLIC=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_RCU_TRACE is not set
index 57afe604b495bef44894b5088517c103376684d4..c207f6634b91c4ecc8f60b759c82056dd5624ed4 100644 (file)
@@ -26,7 +26,7 @@ enum fixed_addresses {
 };
 
 #define FIXADDR_SIZE           (__end_of_fixed_addresses * PAGE_SIZE)
-#define FIXADDR_TOP            (PAGE_OFFSET)
+#define FIXADDR_TOP            (VMALLOC_START)
 #define FIXADDR_START          (FIXADDR_TOP - FIXADDR_SIZE)
 
 #define FIXMAP_PAGE_IO         PAGE_KERNEL
index bba3da6ef1572f41db64e59ca203ae32b9139180..a3d5273ded7c6d0782356f01abf7d5cdca753bc5 100644 (file)
@@ -72,32 +72,20 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       if (i == 0) {
-               args[0] = regs->orig_a0;
-               args++;
-               i++;
-               n--;
-       }
-       memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
+       args[0] = regs->orig_a0;
+       args++;
+       memcpy(args, &regs->a1, 5 * sizeof(args[0]));
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-        if (i == 0) {
-                regs->orig_a0 = args[0];
-                args++;
-                i++;
-                n--;
-        }
-       memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
+       regs->orig_a0 = args[0];
+       args++;
+       memcpy(&regs->a1, args, 5 * sizeof(regs->a1));
 }
 
 static inline int syscall_get_arch(void)
index 439dc7072e05bf37a722bb983b69dabed39651fa..1ad8d093c58b89d7308661ea34d1011b7182dec7 100644 (file)
@@ -18,6 +18,7 @@ struct mmu_gather;
 
 static void tlb_flush(struct mmu_gather *tlb);
 
+#define tlb_flush tlb_flush
 #include <asm-generic/tlb.h>
 
 static inline void tlb_flush(struct mmu_gather *tlb)
index a00168b980d2e6ca265ae0424045508275fbbe3f..fb53a8089e769473434493d59bc408079dcbb519 100644 (file)
@@ -300,7 +300,7 @@ do {                                                                \
                "       .balign 4\n"                            \
                "4:\n"                                          \
                "       li %0, %6\n"                            \
-               "       jump 2b, %1\n"                          \
+               "       jump 3b, %1\n"                          \
                "       .previous\n"                            \
                "       .section __ex_table,\"a\"\n"            \
                "       .balign " RISCV_SZPTR "\n"                      \
index f13f7f276639d504679034a36c53edc15f25dfe1..598568168d3511406fea38b23360c7e28a50a41f 100644 (file)
@@ -4,7 +4,6 @@
 
 ifdef CONFIG_FTRACE
 CFLAGS_REMOVE_ftrace.o = -pg
-CFLAGS_REMOVE_setup.o = -pg
 endif
 
 extra-y += head.o
@@ -29,8 +28,6 @@ obj-y += vdso.o
 obj-y  += cacheinfo.o
 obj-y  += vdso/
 
-CFLAGS_setup.o := -mcmodel=medany
-
 obj-$(CONFIG_FPU)              += fpu.o
 obj-$(CONFIG_SMP)              += smpboot.o
 obj-$(CONFIG_SMP)              += smp.o
index 7dd308129b40f1862ab04dc1e12c790bf7c111fe..2872edce894d1e0b79d58a4ed735649dd8261408 100644 (file)
@@ -141,7 +141,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
 {
        s32 hi20;
 
-       if (IS_ENABLED(CMODEL_MEDLOW)) {
+       if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
                pr_err(
                  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
                  me->name, (long long)v, location);
index ecb654f6a79ef105931a51950d520c1af845edff..540a331d1376922c62ba17bf0d9c786714d89948 100644 (file)
@@ -48,14 +48,6 @@ struct screen_info screen_info = {
 };
 #endif
 
-unsigned long va_pa_offset;
-EXPORT_SYMBOL(va_pa_offset);
-unsigned long pfn_base;
-EXPORT_SYMBOL(pfn_base);
-
-unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
-EXPORT_SYMBOL(empty_zero_page);
-
 /* The lucky hart to first increment this variable will boot the other cores */
 atomic_t hart_lottery;
 unsigned long boot_cpu_hartid;
index a4b1d94371a0dbf6937bc0c8add512618ef1c5c6..4d403274c2e8d0436f2c74e3719cbc75fc057db8 100644 (file)
@@ -169,8 +169,6 @@ static bool save_trace(unsigned long pc, void *arg)
 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 {
        walk_stackframe(tsk, NULL, save_trace, trace);
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
 
index eb22ab49b3e008ec4ab677778302d5dbbea358b1..b68aac7018031cd5afe4ebb293051cbcc814969e 100644 (file)
@@ -1,3 +1,9 @@
+
+CFLAGS_init.o := -mcmodel=medany
+ifdef CONFIG_FTRACE
+CFLAGS_REMOVE_init.o = -pg
+endif
+
 obj-y += init.o
 obj-y += fault.o
 obj-y += extable.o
index b379a75ac6a6778052b9161612357ba5df620648..bc7b77e34d0920f2190c7e8c4edd18658c526703 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/io.h>
 
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
+                                                       __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
+
 static void __init zone_sizes_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
@@ -117,6 +121,14 @@ void __init setup_bootmem(void)
                         */
                        memblock_reserve(reg->base, vmlinux_end - reg->base);
                        mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
+
+                       /*
+                        * Remove memblock from the end of usable area to the
+                        * end of region
+                        */
+                       if (reg->base + mem_size < end)
+                               memblock_remove(reg->base + mem_size,
+                                               end - reg->base - mem_size);
                }
        }
        BUG_ON(mem_size == 0);
@@ -143,6 +155,11 @@ void __init setup_bootmem(void)
        }
 }
 
+unsigned long va_pa_offset;
+EXPORT_SYMBOL(va_pa_offset);
+unsigned long pfn_base;
+EXPORT_SYMBOL(pfn_base);
+
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
 pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
 
@@ -172,6 +189,25 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
        }
 }
 
+/*
+ * setup_vm() is called from head.S with MMU-off.
+ *
+ * Following requirements should be honoured for setup_vm() to work
+ * correctly:
+ * 1) It should use PC-relative addressing for accessing kernel symbols.
+ *    To achieve this we always use GCC cmodel=medany.
+ * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
+ *    so disable compiler instrumentation when FTRACE is enabled.
+ *
+ * Currently, the above requirements are honoured by using custom CFLAGS
+ * for init.o in mm/Makefile.
+ */
+
+#ifndef __riscv_cmodel_medany
+#error "setup_vm() is called from head.S before relocate so it should "
+       "not use absolute addressing."
+#endif
+
 asmlinkage void __init setup_vm(void)
 {
        extern char _start;
index b6e3d0653002af7a8eb20b5a0f3e0d8b27ba0b16..97b555e772d70e86cbd2879ffafa8a7c6b666b59 100644 (file)
@@ -14,12 +14,6 @@ config LOCKDEP_SUPPORT
 config STACKTRACE_SUPPORT
        def_bool y
 
-config RWSEM_GENERIC_SPINLOCK
-       bool
-
-config RWSEM_XCHGADD_ALGORITHM
-       def_bool y
-
 config ARCH_HAS_ILOG2_U32
        def_bool n
 
@@ -164,11 +158,13 @@ config S390
        select HAVE_PERF_USER_STACK_DUMP
        select HAVE_MEMBLOCK_NODE_MAP
        select HAVE_MEMBLOCK_PHYS_MAP
+       select HAVE_MMU_GATHER_NO_GATHER
        select HAVE_MOD_ARCH_SPECIFIC
        select HAVE_NOP_MCOUNT
        select HAVE_OPROFILE
        select HAVE_PCI
        select HAVE_PERF_EVENTS
+       select HAVE_RCU_TABLE_FREE
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RSEQ
        select HAVE_SYSCALL_TRACEPOINTS
index 4cb771ba13fa7fb39da31ac6a8428744c8026d16..5d316fe40480446b9dd5f90fc0bb4f3bba6d3b55 100644 (file)
@@ -25,7 +25,7 @@ static void *mem_detect_alloc_extended(void)
 {
        unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
 
-       if (IS_ENABLED(BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
+       if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
            INITRD_START < offset + ENTRIES_EXTENDED_MAX)
                offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
 
index 12d77cb11fe5a96269a7bde5fe6b8c6f11a23ea8..d5fadefea33ca862c3074e070669e6ff65d95a8f 100644 (file)
@@ -20,7 +20,6 @@ generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
-generic-y += rwsem.h
 generic-y += trace_clock.h
 generic-y += unaligned.h
 generic-y += word-at-a-time.h
index 1a6a7092d94209d4ee330003cfd3d2ccf713b916..e94a0a28b5ebe22b944ea73b1ac48bdcf52d9e63 100644 (file)
@@ -360,4 +360,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
        return reg1;
 }
 
+/*
+ * Interface to tell the AP bus code that a configuration
+ * change has happened. The bus code should at least do
+ * an ap bus resource rescan.
+ */
+#if IS_ENABLED(CONFIG_ZCRYPT)
+void ap_bus_cfg_chg(void);
+#else
+static inline void ap_bus_cfg_chg(void){};
+#endif
+
 #endif /* _ASM_S390_AP_H_ */
index 7d22a474a040ddd3d0e76c84075db6ab17bb2263..f74639a05f0ffc33f638c264af58c48933e36139 100644 (file)
@@ -252,11 +252,14 @@ do {                                                              \
 
 /*
  * Cache aliasing on the latest machines calls for a mapping granularity
- * of 512KB. For 64-bit processes use a 512KB alignment and a randomization
- * of up to 1GB. For 31-bit processes the virtual address space is limited,
- * use no alignment and limit the randomization to 8MB.
+ * of 512KB for the anonymous mapping base. For 64-bit processes use a
+ * 512KB alignment and a randomization of up to 1GB. For 31-bit processes
+ * the virtual address space is limited, use no alignment and limit the
+ * randomization to 8MB.
+ * For the additional randomization of the program break use 32MB for
+ * 64-bit and 8MB for 31-bit.
  */
-#define BRK_RND_MASK   (is_compat_task() ? 0x7ffUL : 0x3ffffUL)
+#define BRK_RND_MASK   (is_compat_task() ? 0x7ffUL : 0x1fffUL)
 #define MMAP_RND_MASK  (is_compat_task() ? 0x7ffUL : 0x3ff80UL)
 #define MMAP_ALIGN_MASK        (is_compat_task() ? 0 : 0x7fUL)
 #define STACK_RND_MASK MMAP_RND_MASK
index cc0947e08b6ffef09419a52eb04f817535016127..5b9f10b1e55dec03c2878a6ab510cb0d128002e5 100644 (file)
@@ -91,52 +91,53 @@ struct lowcore {
        __u64   hardirq_timer;                  /* 0x02e8 */
        __u64   softirq_timer;                  /* 0x02f0 */
        __u64   steal_timer;                    /* 0x02f8 */
-       __u64   last_update_timer;              /* 0x0300 */
-       __u64   last_update_clock;              /* 0x0308 */
-       __u64   int_clock;                      /* 0x0310 */
-       __u64   mcck_clock;                     /* 0x0318 */
-       __u64   clock_comparator;               /* 0x0320 */
-       __u64   boot_clock[2];                  /* 0x0328 */
+       __u64   avg_steal_timer;                /* 0x0300 */
+       __u64   last_update_timer;              /* 0x0308 */
+       __u64   last_update_clock;              /* 0x0310 */
+       __u64   int_clock;                      /* 0x0318*/
+       __u64   mcck_clock;                     /* 0x0320 */
+       __u64   clock_comparator;               /* 0x0328 */
+       __u64   boot_clock[2];                  /* 0x0330 */
 
        /* Current process. */
-       __u64   current_task;                   /* 0x0338 */
-       __u64   kernel_stack;                   /* 0x0340 */
+       __u64   current_task;                   /* 0x0340 */
+       __u64   kernel_stack;                   /* 0x0348 */
 
        /* Interrupt, DAT-off and restartstack. */
-       __u64   async_stack;                    /* 0x0348 */
-       __u64   nodat_stack;                    /* 0x0350 */
-       __u64   restart_stack;                  /* 0x0358 */
+       __u64   async_stack;                    /* 0x0350 */
+       __u64   nodat_stack;                    /* 0x0358 */
+       __u64   restart_stack;                  /* 0x0360 */
 
        /* Restart function and parameter. */
-       __u64   restart_fn;                     /* 0x0360 */
-       __u64   restart_data;                   /* 0x0368 */
-       __u64   restart_source;                 /* 0x0370 */
+       __u64   restart_fn;                     /* 0x0368 */
+       __u64   restart_data;                   /* 0x0370 */
+       __u64   restart_source;                 /* 0x0378 */
 
        /* Address space pointer. */
-       __u64   kernel_asce;                    /* 0x0378 */
-       __u64   user_asce;                      /* 0x0380 */
-       __u64   vdso_asce;                      /* 0x0388 */
+       __u64   kernel_asce;                    /* 0x0380 */
+       __u64   user_asce;                      /* 0x0388 */
+       __u64   vdso_asce;                      /* 0x0390 */
 
        /*
         * The lpp and current_pid fields form a
         * 64-bit value that is set as program
         * parameter with the LPP instruction.
         */
-       __u32   lpp;                            /* 0x0390 */
-       __u32   current_pid;                    /* 0x0394 */
+       __u32   lpp;                            /* 0x0398 */
+       __u32   current_pid;                    /* 0x039c */
 
        /* SMP info area */
-       __u32   cpu_nr;                         /* 0x0398 */
-       __u32   softirq_pending;                /* 0x039c */
-       __u32   preempt_count;                  /* 0x03a0 */
-       __u32   spinlock_lockval;               /* 0x03a4 */
-       __u32   spinlock_index;                 /* 0x03a8 */
-       __u32   fpu_flags;                      /* 0x03ac */
-       __u64   percpu_offset;                  /* 0x03b0 */
-       __u64   vdso_per_cpu_data;              /* 0x03b8 */
-       __u64   machine_flags;                  /* 0x03c0 */
-       __u64   gmap;                           /* 0x03c8 */
-       __u8    pad_0x03d0[0x0400-0x03d0];      /* 0x03d0 */
+       __u32   cpu_nr;                         /* 0x03a0 */
+       __u32   softirq_pending;                /* 0x03a4 */
+       __u32   preempt_count;                  /* 0x03a8 */
+       __u32   spinlock_lockval;               /* 0x03ac */
+       __u32   spinlock_index;                 /* 0x03b0 */
+       __u32   fpu_flags;                      /* 0x03b4 */
+       __u64   percpu_offset;                  /* 0x03b8 */
+       __u64   vdso_per_cpu_data;              /* 0x03c0 */
+       __u64   machine_flags;                  /* 0x03c8 */
+       __u64   gmap;                           /* 0x03d0 */
+       __u8    pad_0x03d8[0x0400-0x03d8];      /* 0x03d8 */
 
        /* br %r1 trampoline */
        __u16   br_r1_trampoline;               /* 0x0400 */
index 96f9a9151fde02fc6f76633d76d292f47512d364..59c3e91f2cdb6636023eefc4b3a1dd7507f3b2f6 100644 (file)
@@ -56,40 +56,32 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
        unsigned long mask = -1UL;
+       unsigned int n = 6;
 
-       /*
-        * No arguments for this syscall, there's nothing to do.
-        */
-       if (!n)
-               return;
-
-       BUG_ON(i + n > 6);
 #ifdef CONFIG_COMPAT
        if (test_tsk_thread_flag(task, TIF_31BIT))
                mask = 0xffffffff;
 #endif
        while (n-- > 0)
-               if (i + n > 0)
-                       args[n] = regs->gprs[2 + i + n] & mask;
-       if (i == 0)
-               args[0] = regs->orig_gpr2 & mask;
+               if (n > 0)
+                       args[n] = regs->gprs[2 + n] & mask;
+
+       args[0] = regs->orig_gpr2 & mask;
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
+       unsigned int n = 6;
+
        while (n-- > 0)
-               if (i + n > 0)
-                       regs->gprs[2 + i + n] = args[n];
-       if (i == 0)
-               regs->orig_gpr2 = args[0];
+               if (n > 0)
+                       regs->gprs[2 + n] = args[n];
+       regs->orig_gpr2 = args[0];
 }
 
 static inline int syscall_get_arch(void)
index b31c779cf58176ad3bf91ee816053cbcf40b3476..aa406c05a350589567699bfb304e1dfdbe287d25 100644 (file)
  * Pages used for the page tables is a different story. FIXME: more
  */
 
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/swap.h>
-#include <asm/processor.h>
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-
-struct mmu_gather {
-       struct mm_struct *mm;
-       struct mmu_table_batch *batch;
-       unsigned int fullmm;
-       unsigned long start, end;
-};
-
-struct mmu_table_batch {
-       struct rcu_head         rcu;
-       unsigned int            nr;
-       void                    *tables[0];
-};
-
-#define MAX_TABLE_BATCH                \
-       ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
-
-extern void tlb_table_flush(struct mmu_gather *tlb);
-extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-
-static inline void
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-                       unsigned long start, unsigned long end)
-{
-       tlb->mm = mm;
-       tlb->start = start;
-       tlb->end = end;
-       tlb->fullmm = !(start | (end+1));
-       tlb->batch = NULL;
-}
-
-static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
-       __tlb_flush_mm_lazy(tlb->mm);
-}
-
-static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
-       tlb_table_flush(tlb);
-}
-
+void __tlb_remove_table(void *_table);
+static inline void tlb_flush(struct mmu_gather *tlb);
+static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
+                                         struct page *page, int page_size);
 
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
-{
-       tlb_flush_mmu_tlbonly(tlb);
-       tlb_flush_mmu_free(tlb);
-}
+#define tlb_start_vma(tlb, vma)                        do { } while (0)
+#define tlb_end_vma(tlb, vma)                  do { } while (0)
 
-static inline void
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
-               unsigned long start, unsigned long end, bool force)
-{
-       if (force) {
-               tlb->start = start;
-               tlb->end = end;
-       }
+#define tlb_flush tlb_flush
+#define pte_free_tlb pte_free_tlb
+#define pmd_free_tlb pmd_free_tlb
+#define p4d_free_tlb p4d_free_tlb
+#define pud_free_tlb pud_free_tlb
 
-       tlb_flush_mmu(tlb);
-}
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm-generic/tlb.h>
 
 /*
  * Release the page cache reference for a pte removed by
  * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
  * has already been freed, so just do free_page_and_swap_cache.
  */
-static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
-       free_page_and_swap_cache(page);
-       return false; /* avoid calling tlb_flush_mmu */
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
-       free_page_and_swap_cache(page);
-}
-
 static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
                                          struct page *page, int page_size)
 {
-       return __tlb_remove_page(tlb, page);
+       free_page_and_swap_cache(page);
+       return false;
 }
 
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
-                                       struct page *page, int page_size)
+static inline void tlb_flush(struct mmu_gather *tlb)
 {
-       return tlb_remove_page(tlb, page);
+       __tlb_flush_mm_lazy(tlb->mm);
 }
 
 /*
@@ -121,8 +62,17 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
  * page table from the tlb.
  */
 static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
-                               unsigned long address)
+                                unsigned long address)
 {
+       __tlb_adjust_range(tlb, address, PAGE_SIZE);
+       tlb->mm->context.flush_mm = 1;
+       tlb->freed_tables = 1;
+       tlb->cleared_ptes = 1;
+       /*
+        * page_table_free_rcu takes care of the allocation bit masks
+        * of the 2K table fragments in the 4K page table page,
+        * then calls tlb_remove_table.
+        */
        page_table_free_rcu(tlb, (unsigned long *) pte, address);
 }
 
@@ -139,6 +89,10 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
        if (mm_pmd_folded(tlb->mm))
                return;
        pgtable_pmd_page_dtor(virt_to_page(pmd));
+       __tlb_adjust_range(tlb, address, PAGE_SIZE);
+       tlb->mm->context.flush_mm = 1;
+       tlb->freed_tables = 1;
+       tlb->cleared_puds = 1;
        tlb_remove_table(tlb, pmd);
 }
 
@@ -154,6 +108,10 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
 {
        if (mm_p4d_folded(tlb->mm))
                return;
+       __tlb_adjust_range(tlb, address, PAGE_SIZE);
+       tlb->mm->context.flush_mm = 1;
+       tlb->freed_tables = 1;
+       tlb->cleared_p4ds = 1;
        tlb_remove_table(tlb, p4d);
 }
 
@@ -169,21 +127,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
 {
        if (mm_pud_folded(tlb->mm))
                return;
+       tlb->mm->context.flush_mm = 1;
+       tlb->freed_tables = 1;
+       tlb->cleared_puds = 1;
        tlb_remove_table(tlb, pud);
 }
 
-#define tlb_start_vma(tlb, vma)                        do { } while (0)
-#define tlb_end_vma(tlb, vma)                  do { } while (0)
-#define tlb_remove_tlb_entry(tlb, ptep, addr)  do { } while (0)
-#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr)      do { } while (0)
-#define tlb_migrate_finish(mm)                 do { } while (0)
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)       \
-       tlb_remove_tlb_entry(tlb, ptep, address)
-
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
-                                                    unsigned int page_size)
-{
-}
 
 #endif /* _S390_TLB_H */
index 594464f2129d4706fc4786d2de55d7c73974c97c..0da378e2eb25edcfee1f787b50eb900947b2ffc4 100644 (file)
@@ -23,7 +23,7 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
 
        if (flags & KERNEL_FPC)
                /* Save floating point control */
-               asm volatile("stfpc %0" : "=m" (state->fpc));
+               asm volatile("stfpc %0" : "=Q" (state->fpc));
 
        if (!MACHINE_HAS_VX) {
                if (flags & KERNEL_VXR_V0V7) {
index bdddaae9655984dfbf59ccee386c7bb3608cb183..649135cbedd5c4407f16d89f4020d5022f40395e 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/module.h>
 #include <linux/device.h>
+#include <linux/cpu.h>
 #include <asm/nospec-branch.h>
 
 static int __init nobp_setup_early(char *str)
@@ -58,7 +59,7 @@ early_param("nospectre_v2", nospectre_v2_setup_early);
 
 void __init nospec_auto_detect(void)
 {
-       if (test_facility(156)) {
+       if (test_facility(156) || cpu_mitigations_off()) {
                /*
                 * The machine supports etokens.
                 * Disable expolines and disable nobp.
index c6fad208c2fa5a8ffaad40d554c7597097d3e4fa..b6854812d2ed56f11cbd03865c16b26290518611 100644 (file)
@@ -196,23 +196,30 @@ static void cf_diag_perf_event_destroy(struct perf_event *event)
  */
 static int __hw_perf_event_init(struct perf_event *event)
 {
-       struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
        struct perf_event_attr *attr = &event->attr;
+       struct cpu_cf_events *cpuhw;
        enum cpumf_ctr_set i;
        int err = 0;
 
-       debug_sprintf_event(cf_diag_dbg, 5,
-                           "%s event %p cpu %d authorized %#x\n", __func__,
-                           event, event->cpu, cpuhw->info.auth_ctl);
+       debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
+                           event, event->cpu);
 
        event->hw.config = attr->config;
        event->hw.config_base = 0;
-       local64_set(&event->count, 0);
 
-       /* Add all authorized counter sets to config_base */
+       /* Add all authorized counter sets to config_base. The
+        * the hardware init function is either called per-cpu or just once
+        * for all CPUS (event->cpu == -1).  This depends on the whether
+        * counting is started for all CPUs or on a per workload base where
+        * the perf event moves from one CPU to another CPU.
+        * Checking the authorization on any CPU is fine as the hardware
+        * applies the same authorization settings to all CPUs.
+        */
+       cpuhw = &get_cpu_var(cpu_cf_events);
        for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
                if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
                        event->hw.config_base |= cpumf_ctr_ctl[i];
+       put_cpu_var(cpu_cf_events);
 
        /* No authorized counter sets, nothing to count/sample */
        if (!event->hw.config_base) {
index 3fe1c77c361b98a9a4443bf1a2941f486d024030..bd197baf1dc337f018af35eeb19635b1c95998b7 100644 (file)
@@ -266,7 +266,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
        lc->percpu_offset = __per_cpu_offset[cpu];
        lc->kernel_asce = S390_lowcore.kernel_asce;
        lc->machine_flags = S390_lowcore.machine_flags;
-       lc->user_timer = lc->system_timer = lc->steal_timer = 0;
+       lc->user_timer = lc->system_timer =
+               lc->steal_timer = lc->avg_steal_timer = 0;
        __ctl_store(lc->cregs_save_area, 0, 15);
        save_access_regs((unsigned int *) lc->access_regs_save_area);
        memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
index 460dcfba7d4ec08db7de61942ea387ef38579a99..cc9ed97870683afe706da93d692aa36748800895 100644 (file)
@@ -45,8 +45,6 @@ void save_stack_trace(struct stack_trace *trace)
 
        sp = current_stack_pointer();
        dump_trace(save_address, trace, NULL, sp);
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 EXPORT_SYMBOL_GPL(save_stack_trace);
 
@@ -58,8 +56,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
        if (tsk == current)
                sp = current_stack_pointer();
        dump_trace(save_address_nosched, trace, tsk, sp);
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
 
@@ -69,7 +65,5 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
 
        sp = kernel_stack_pointer(regs);
        dump_trace(save_address, trace, NULL, sp);
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 EXPORT_SYMBOL_GPL(save_stack_trace_regs);
index 02579f95f391b6524ddd28004cc6d6a511be974b..061418f787c3712f4091cfeb94b8dfb5d2b1eb03 100644 (file)
 421    32      rt_sigtimedwait_time64  -                               compat_sys_rt_sigtimedwait_time64
 422    32      futex_time64            -                               sys_futex
 423    32      sched_rr_get_interval_time64    -                       sys_sched_rr_get_interval
+424  common    pidfd_send_signal       sys_pidfd_send_signal           sys_pidfd_send_signal
+425  common    io_uring_setup          sys_io_uring_setup              sys_io_uring_setup
+426  common    io_uring_enter          sys_io_uring_enter              sys_io_uring_enter
+427  common    io_uring_register       sys_io_uring_register           sys_io_uring_register
index 98f850e00008e99a1e64e8f20a74bbaaf4910636..c475ca49cfc6b43c02ab924e218e541a92b677b8 100644 (file)
@@ -37,7 +37,7 @@ static inline u64 get_vtimer(void)
 {
        u64 timer;
 
-       asm volatile("stpt %0" : "=m" (timer));
+       asm volatile("stpt %0" : "=Q" (timer));
        return timer;
 }
 
@@ -48,7 +48,7 @@ static inline void set_vtimer(u64 expires)
        asm volatile(
                "       stpt    %0\n"   /* Store current cpu timer value */
                "       spt     %1"     /* Set new value imm. afterwards */
-               : "=m" (timer) : "m" (expires));
+               : "=Q" (timer) : "Q" (expires));
        S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
        S390_lowcore.last_update_timer = expires;
 }
@@ -124,7 +124,7 @@ static void account_system_index_scaled(struct task_struct *p, u64 cputime,
  */
 static int do_account_vtime(struct task_struct *tsk)
 {
-       u64 timer, clock, user, guest, system, hardirq, softirq, steal;
+       u64 timer, clock, user, guest, system, hardirq, softirq;
 
        timer = S390_lowcore.last_update_timer;
        clock = S390_lowcore.last_update_clock;
@@ -135,8 +135,8 @@ static int do_account_vtime(struct task_struct *tsk)
 #else
                "       stck    %1"     /* Store current tod clock value */
 #endif
-               : "=m" (S390_lowcore.last_update_timer),
-                 "=m" (S390_lowcore.last_update_clock));
+               : "=Q" (S390_lowcore.last_update_timer),
+                 "=Q" (S390_lowcore.last_update_clock));
        clock = S390_lowcore.last_update_clock - clock;
        timer -= S390_lowcore.last_update_timer;
 
@@ -182,12 +182,6 @@ static int do_account_vtime(struct task_struct *tsk)
        if (softirq)
                account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
 
-       steal = S390_lowcore.steal_timer;
-       if ((s64) steal > 0) {
-               S390_lowcore.steal_timer = 0;
-               account_steal_time(cputime_to_nsecs(steal));
-       }
-
        return virt_timer_forward(user + guest + system + hardirq + softirq);
 }
 
@@ -213,8 +207,19 @@ void vtime_task_switch(struct task_struct *prev)
  */
 void vtime_flush(struct task_struct *tsk)
 {
+       u64 steal, avg_steal;
+
        if (do_account_vtime(tsk))
                virt_timer_expire();
+
+       steal = S390_lowcore.steal_timer;
+       avg_steal = S390_lowcore.avg_steal_timer / 2;
+       if ((s64) steal > 0) {
+               S390_lowcore.steal_timer = 0;
+               account_steal_time(steal);
+               avg_steal += steal;
+       }
+       S390_lowcore.avg_steal_timer = avg_steal;
 }
 
 /*
index db6bb2f97a2c62f9334c1e1cf8ab39460c15acf0..99e06213a22b7c259ba90831c4cf7cbda6d1a301 100644 (file)
@@ -290,7 +290,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
        tlb_remove_table(tlb, table);
 }
 
-static void __tlb_remove_table(void *_table)
+void __tlb_remove_table(void *_table)
 {
        unsigned int mask = (unsigned long) _table & 3;
        void *table = (void *)((unsigned long) _table ^ mask);
@@ -316,67 +316,6 @@ static void __tlb_remove_table(void *_table)
        }
 }
 
-static void tlb_remove_table_smp_sync(void *arg)
-{
-       /* Simply deliver the interrupt */
-}
-
-static void tlb_remove_table_one(void *table)
-{
-       /*
-        * This isn't an RCU grace period and hence the page-tables cannot be
-        * assumed to be actually RCU-freed.
-        *
-        * It is however sufficient for software page-table walkers that rely
-        * on IRQ disabling. See the comment near struct mmu_table_batch.
-        */
-       smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
-       __tlb_remove_table(table);
-}
-
-static void tlb_remove_table_rcu(struct rcu_head *head)
-{
-       struct mmu_table_batch *batch;
-       int i;
-
-       batch = container_of(head, struct mmu_table_batch, rcu);
-
-       for (i = 0; i < batch->nr; i++)
-               __tlb_remove_table(batch->tables[i]);
-
-       free_page((unsigned long)batch);
-}
-
-void tlb_table_flush(struct mmu_gather *tlb)
-{
-       struct mmu_table_batch **batch = &tlb->batch;
-
-       if (*batch) {
-               call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
-               *batch = NULL;
-       }
-}
-
-void tlb_remove_table(struct mmu_gather *tlb, void *table)
-{
-       struct mmu_table_batch **batch = &tlb->batch;
-
-       tlb->mm->context.flush_mm = 1;
-       if (*batch == NULL) {
-               *batch = (struct mmu_table_batch *)
-                       __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
-               if (*batch == NULL) {
-                       __tlb_flush_mm_lazy(tlb->mm);
-                       tlb_remove_table_one(table);
-                       return;
-               }
-               (*batch)->nr = 0;
-       }
-       (*batch)->tables[(*batch)->nr++] = table;
-       if ((*batch)->nr == MAX_TABLE_BATCH)
-               tlb_flush_mmu(tlb);
-}
-
 /*
  * Base infrastructure required to generate basic asces, region, segment,
  * and page tables that do not make use of enhanced features like EDAT1.
index b1c91ea9a958e939da0a91d720346f6d229a384c..0be08d586d40c64ee7db194210048a7724480ba6 100644 (file)
@@ -90,12 +90,6 @@ config ARCH_DEFCONFIG
        default "arch/sh/configs/shx3_defconfig" if SUPERH32
        default "arch/sh/configs/cayman_defconfig" if SUPERH64
 
-config RWSEM_GENERIC_SPINLOCK
-       def_bool y
-
-config RWSEM_XCHGADD_ALGORITHM
-       bool
-
 config GENERIC_BUG
        def_bool y
        depends on BUG && SUPERH32
index 958f46da3a7912cfd94a7b517e8e88b88fec3a5a..d91065e81a4e5cffcb2b86463c8dba9190c0d7fa 100644 (file)
@@ -164,10 +164,10 @@ static struct sh_machine_vector __initmv sh_of_generic_mv = {
 
 struct sh_clk_ops;
 
-void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
 {
 }
 
-void __init plat_irq_setup(void)
+void __init __weak plat_irq_setup(void)
 {
 }
index a6ef3fee5f85714f69e6692e563491960372ab75..73fff39a0122f0405f0940036a09cc4283946d44 100644 (file)
@@ -9,6 +9,7 @@ generic-y += emergency-restart.h
 generic-y += exec.h
 generic-y += irq_regs.h
 generic-y += irq_work.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
@@ -16,7 +17,6 @@ generic-y += mm-arch-hooks.h
 generic-y += parport.h
 generic-y += percpu.h
 generic-y += preempt.h
-generic-y += rwsem.h
 generic-y += serial.h
 generic-y += sizes.h
 generic-y += trace_clock.h
index 8ad73cb311216a0a8801ee3890ed27788a79ad61..b56f908b13950e31335984ec09c2f88973915c90 100644 (file)
@@ -70,6 +70,15 @@ do {                                                 \
        tlb_remove_page((tlb), (pte));                  \
 } while (0)
 
+#if CONFIG_PGTABLE_LEVELS > 2
+#define __pmd_free_tlb(tlb, pmdp, addr)                        \
+do {                                                   \
+       struct page *page = virt_to_page(pmdp);         \
+       pgtable_pmd_page_dtor(page);                    \
+       tlb_remove_page((tlb), page);                   \
+} while (0);
+#endif
+
 static inline void check_pgt_cache(void)
 {
        quicklist_trim(QUICK_PT, NULL, 25, 16);
index 6e118799831c32dc37b8cf21960d284c5bec3646..8c9d7e5e5dcc02375eeafab25e47878b76239aaa 100644 (file)
@@ -48,51 +48,28 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       /*
-        * Do this simply for now. If we need to start supporting
-        * fetching arguments from arbitrary indices, this will need some
-        * extra logic. Presently there are no in-tree users that depend
-        * on this behaviour.
-        */
-       BUG_ON(i);
 
        /* Argument pattern is: R4, R5, R6, R7, R0, R1 */
-       switch (n) {
-       case 6: args[5] = regs->regs[1];
-       case 5: args[4] = regs->regs[0];
-       case 4: args[3] = regs->regs[7];
-       case 3: args[2] = regs->regs[6];
-       case 2: args[1] = regs->regs[5];
-       case 1: args[0] = regs->regs[4];
-       case 0:
-               break;
-       default:
-               BUG();
-       }
+       args[5] = regs->regs[1];
+       args[4] = regs->regs[0];
+       args[3] = regs->regs[7];
+       args[2] = regs->regs[6];
+       args[1] = regs->regs[5];
+       args[0] = regs->regs[4];
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       /* Same note as above applies */
-       BUG_ON(i);
-
-       switch (n) {
-       case 6: regs->regs[1] = args[5];
-       case 5: regs->regs[0] = args[4];
-       case 4: regs->regs[7] = args[3];
-       case 3: regs->regs[6] = args[2];
-       case 2: regs->regs[5] = args[1];
-       case 1: regs->regs[4] = args[0];
-               break;
-       default:
-               BUG();
-       }
+       regs->regs[1] = args[5];
+       regs->regs[0] = args[4];
+       regs->regs[7] = args[3];
+       regs->regs[6] = args[2];
+       regs->regs[5] = args[1];
+       regs->regs[4] = args[0];
 }
 
 static inline int syscall_get_arch(void)
index 43882580c7f99bec93e519f1b4182c1daad2fbf0..22fad97da06619a137f6f4cd3e3ca4f6a9bddfcc 100644 (file)
@@ -47,20 +47,16 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       memcpy(args, &regs->regs[2 + i], n * sizeof(args[0]));
+       memcpy(args, &regs->regs[2], 6 * sizeof(args[0]));
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       memcpy(&regs->regs[2 + i], args, n * sizeof(args[0]));
+       memcpy(&regs->regs[2], args, 6 * sizeof(args[0]));
 }
 
 static inline int syscall_get_arch(void)
index 77abe192fb43d90cd6d56bfe878b54126188ac34..bc77f3dd4261da2213368c182cc6bc327b785d98 100644 (file)
 
 #ifdef CONFIG_MMU
 #include <linux/swap.h>
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-#include <asm/mmu_context.h>
 
-/*
- * TLB handling.  This allows us to remove pages from the page
- * tables, and efficiently handle the TLB issues.
- */
-struct mmu_gather {
-       struct mm_struct        *mm;
-       unsigned int            fullmm;
-       unsigned long           start, end;
-};
-
-static inline void init_tlb_gather(struct mmu_gather *tlb)
-{
-       tlb->start = TASK_SIZE;
-       tlb->end = 0;
-
-       if (tlb->fullmm) {
-               tlb->start = 0;
-               tlb->end = TASK_SIZE;
-       }
-}
-
-static inline void
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-               unsigned long start, unsigned long end)
-{
-       tlb->mm = mm;
-       tlb->start = start;
-       tlb->end = end;
-       tlb->fullmm = !(start | (end+1));
-
-       init_tlb_gather(tlb);
-}
-
-static inline void
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
-               unsigned long start, unsigned long end, bool force)
-{
-       if (tlb->fullmm || force)
-               flush_tlb_mm(tlb->mm);
-
-       /* keep the page table cache within bounds */
-       check_pgt_cache();
-}
-
-static inline void
-tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
-{
-       if (tlb->start > address)
-               tlb->start = address;
-       if (tlb->end < address + PAGE_SIZE)
-               tlb->end = address + PAGE_SIZE;
-}
-
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)       \
-       tlb_remove_tlb_entry(tlb, ptep, address)
-
-/*
- * In the case of tlb vma handling, we can optimise these away in the
- * case where we're doing a full MM flush.  When we're doing a munmap,
- * the vmas are adjusted to only cover the region to be torn down.
- */
-static inline void
-tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
-{
-       if (!tlb->fullmm)
-               flush_cache_range(vma, vma->vm_start, vma->vm_end);
-}
-
-static inline void
-tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
-{
-       if (!tlb->fullmm && tlb->end) {
-               flush_tlb_range(vma, tlb->start, tlb->end);
-               init_tlb_gather(tlb);
-       }
-}
-
-static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
-}
-
-static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
-}
-
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
-{
-}
-
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
-       free_page_and_swap_cache(page);
-       return false; /* avoid calling tlb_flush_mmu */
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
-       __tlb_remove_page(tlb, page);
-}
-
-static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
-                                         struct page *page, int page_size)
-{
-       return __tlb_remove_page(tlb, page);
-}
-
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
-                                       struct page *page, int page_size)
-{
-       return tlb_remove_page(tlb, page);
-}
-
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
-                                                    unsigned int page_size)
-{
-}
-
-#define pte_free_tlb(tlb, ptep, addr)  pte_free((tlb)->mm, ptep)
-#define pmd_free_tlb(tlb, pmdp, addr)  pmd_free((tlb)->mm, pmdp)
-#define pud_free_tlb(tlb, pudp, addr)  pud_free((tlb)->mm, pudp)
-
-#define tlb_migrate_finish(mm)         do { } while (0)
+#include <asm-generic/tlb.h>
 
 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
 extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
@@ -157,11 +32,6 @@ static inline void tlb_unwire_entry(void)
 
 #else /* CONFIG_MMU */
 
-#define tlb_start_vma(tlb, vma)                                do { } while (0)
-#define tlb_end_vma(tlb, vma)                          do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, pte, address)      do { } while (0)
-#define tlb_flush(tlb)                                 do { } while (0)
-
 #include <asm-generic/tlb.h>
 
 #endif /* CONFIG_MMU */
index ecfbd40924dd948f97985e65daa18216c123c424..b8812c74c1dee1ec8dc80bac7ec4f80a4cc454a4 100644 (file)
@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
 
 generated-y += unistd_32.h
-generic-y += kvm_para.h
 generic-y += ucontext.h
index f3cb2cccb2624de9a509082047b7c6f10f1997ee..2950b19ad077208114e13b8547d5f678649ea6fc 100644 (file)
@@ -49,8 +49,6 @@ void save_stack_trace(struct stack_trace *trace)
        unsigned long *sp = (unsigned long *)current_stack_pointer;
 
        unwind_stack(current, NULL, sp,  &save_stack_ops, trace);
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 EXPORT_SYMBOL_GPL(save_stack_trace);
 
@@ -84,7 +82,5 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
        unsigned long *sp = (unsigned long *)tsk->thread.sp;
 
        unwind_stack(current, NULL, sp,  &save_stack_ops_nosched, trace);
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
index bfda678576e4335788f844db6ec7632fda5faedf..480b057556ee45a3871485ce7301d2436cca8255 100644 (file)
 421    common  rt_sigtimedwait_time64          sys_rt_sigtimedwait
 422    common  futex_time64                    sys_futex
 423    common  sched_rr_get_interval_time64    sys_sched_rr_get_interval
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index 40f8f4f73fe8fea23c31b73d9e4441dbd67fabf1..f6421c9ce5d3f0b7590f198c394ea8d761014e81 100644 (file)
@@ -63,6 +63,7 @@ config SPARC64
        select HAVE_KRETPROBES
        select HAVE_KPROBES
        select HAVE_RCU_TABLE_FREE if SMP
+       select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
        select HAVE_MEMBLOCK_NODE_MAP
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
        select HAVE_DYNAMIC_FTRACE
@@ -191,14 +192,6 @@ config NR_CPUS
 
 source "kernel/Kconfig.hz"
 
-config RWSEM_GENERIC_SPINLOCK
-       bool
-       default y if SPARC32
-
-config RWSEM_XCHGADD_ALGORITHM
-       bool
-       default y if SPARC64
-
 config GENERIC_HWEIGHT
        bool
        default y
index b82f64e28f55c12fc36f56897bcb5e80f44c9e2d..2ca3200d3616abb950e87c7570937f46426df311 100644 (file)
@@ -9,6 +9,7 @@ generic-y += exec.h
 generic-y += export.h
 generic-y += irq_regs.h
 generic-y += irq_work.h
+generic-y += kvm_para.h
 generic-y += linkage.h
 generic-y += local.h
 generic-y += local64.h
@@ -17,7 +18,6 @@ generic-y += mm-arch-hooks.h
 generic-y += module.h
 generic-y += msi.h
 generic-y += preempt.h
-generic-y += rwsem.h
 generic-y += serial.h
 generic-y += trace_clock.h
 generic-y += word-at-a-time.h
index 053989e3f6a6f1435323873ea010723ac09736bd..4d075434e8164c18e140249d65cbffdb28290dc6 100644 (file)
@@ -96,11 +96,11 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
        int zero_extend = 0;
        unsigned int j;
+       unsigned int n = 6;
 
 #ifdef CONFIG_SPARC64
        if (test_tsk_thread_flag(task, TIF_32BIT))
@@ -108,7 +108,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
 #endif
 
        for (j = 0; j < n; j++) {
-               unsigned long val = regs->u_regs[UREG_I0 + i + j];
+               unsigned long val = regs->u_regs[UREG_I0 + j];
 
                if (zero_extend)
                        args[j] = (u32) val;
@@ -119,13 +119,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
-       unsigned int j;
+       unsigned int i;
 
-       for (j = 0; j < n; j++)
-               regs->u_regs[UREG_I0 + i + j] = args[j];
+       for (i = 0; i < 6; i++)
+               regs->u_regs[UREG_I0 + i] = args[i];
 }
 
 static inline int syscall_get_arch(void)
index 343cea19e5735b200eecb8c87c1a51ef54ae9e39..5cd28a8793e3975aef8e7389ec232068fb6feffb 100644 (file)
@@ -2,24 +2,6 @@
 #ifndef _SPARC_TLB_H
 #define _SPARC_TLB_H
 
-#define tlb_start_vma(tlb, vma) \
-do {                                                           \
-       flush_cache_range(vma, vma->vm_start, vma->vm_end);     \
-} while (0)
-
-#define tlb_end_vma(tlb, vma) \
-do {                                                           \
-       flush_tlb_range(vma, vma->vm_start, vma->vm_end);       \
-} while (0)
-
-#define __tlb_remove_tlb_entry(tlb, pte, address) \
-       do { } while (0)
-
-#define tlb_flush(tlb) \
-do {                                                           \
-       flush_tlb_mm((tlb)->mm);                                \
-} while (0)
-
 #include <asm-generic/tlb.h>
 
 #endif /* _SPARC_TLB_H */
diff --git a/arch/sparc/include/uapi/asm/kvm_para.h b/arch/sparc/include/uapi/asm/kvm_para.h
deleted file mode 100644 (file)
index baacc49..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/kvm_para.h>
index a8af6023c1263f7b43a4a52f52089235493923bd..14b93c5564e3572c07993c74217fb0b89ee36573 100644 (file)
@@ -73,6 +73,11 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
        p->npages       = 0;
 }
 
+static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
+{
+       return iommu->atu && mask > DMA_BIT_MASK(32);
+}
+
 /* Interrupts must be disabled.  */
 static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
 {
@@ -92,7 +97,7 @@ static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
                prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
 
        while (npages != 0) {
-               if (mask <= DMA_BIT_MASK(32) || !pbm->iommu->atu) {
+               if (!iommu_use_atu(pbm->iommu, mask)) {
                        num = pci_sun4v_iommu_map(devhandle,
                                                  HV_PCI_TSBID(0, entry),
                                                  npages,
@@ -179,7 +184,6 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
        unsigned long flags, order, first_page, npages, n;
        unsigned long prot = 0;
        struct iommu *iommu;
-       struct atu *atu;
        struct iommu_map_table *tbl;
        struct page *page;
        void *ret;
@@ -205,13 +209,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
        memset((char *)first_page, 0, PAGE_SIZE << order);
 
        iommu = dev->archdata.iommu;
-       atu = iommu->atu;
-
        mask = dev->coherent_dma_mask;
-       if (mask <= DMA_BIT_MASK(32) || !atu)
+       if (!iommu_use_atu(iommu, mask))
                tbl = &iommu->tbl;
        else
-               tbl = &atu->tbl;
+               tbl = &iommu->atu->tbl;
 
        entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
                                      (unsigned long)(-1), 0);
@@ -333,7 +335,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
        atu = iommu->atu;
        devhandle = pbm->devhandle;
 
-       if (dvma <= DMA_BIT_MASK(32)) {
+       if (!iommu_use_atu(iommu, dvma)) {
                tbl = &iommu->tbl;
                iotsb_num = 0; /* we don't care for legacy iommu */
        } else {
@@ -374,7 +376,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
        npages >>= IO_PAGE_SHIFT;
 
        mask = *dev->dma_mask;
-       if (mask <= DMA_BIT_MASK(32))
+       if (!iommu_use_atu(iommu, mask))
                tbl = &iommu->tbl;
        else
                tbl = &atu->tbl;
@@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
                                  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
 
        mask = *dev->dma_mask;
-       if (mask <= DMA_BIT_MASK(32))
+       if (!iommu_use_atu(iommu, mask))
                tbl = &iommu->tbl;
        else
                tbl = &atu->tbl;
index b9a5a04b2d2c543791088b69aae612ed56a97e5e..a1dd24307b001aa95801d3e24003ffd719711728 100644 (file)
 421    32      rt_sigtimedwait_time64          sys_rt_sigtimedwait             compat_sys_rt_sigtimedwait_time64
 422    32      futex_time64                    sys_futex                       sys_futex
 423    32      sched_rr_get_interval_time64    sys_sched_rr_get_interval       sys_sched_rr_get_interval
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index 9fb9cf8cd39a3b29f45a80d1a4281d2abd82a262..98e50c50c12efb65ee100eecff49ea6a54e74853 100644 (file)
@@ -53,84 +53,30 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
        const struct uml_pt_regs *r = &regs->regs;
 
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG1(r);
-       case 1:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG2(r);
-       case 2:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG3(r);
-       case 3:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG4(r);
-       case 4:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG5(r);
-       case 5:
-               if (!n--)
-                       break;
-               *args++ = UPT_SYSCALL_ARG6(r);
-       case 6:
-               if (!n--)
-                       break;
-       default:
-               BUG();
-               break;
-       }
+       *args++ = UPT_SYSCALL_ARG1(r);
+       *args++ = UPT_SYSCALL_ARG2(r);
+       *args++ = UPT_SYSCALL_ARG3(r);
+       *args++ = UPT_SYSCALL_ARG4(r);
+       *args++ = UPT_SYSCALL_ARG5(r);
+       *args   = UPT_SYSCALL_ARG6(r);
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
        struct uml_pt_regs *r = &regs->regs;
 
-       switch (i) {
-       case 0:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG1(r) = *args++;
-       case 1:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG2(r) = *args++;
-       case 2:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG3(r) = *args++;
-       case 3:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG4(r) = *args++;
-       case 4:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG5(r) = *args++;
-       case 5:
-               if (!n--)
-                       break;
-               UPT_SYSCALL_ARG6(r) = *args++;
-       case 6:
-               if (!n--)
-                       break;
-       default:
-               BUG();
-               break;
-       }
+       UPT_SYSCALL_ARG1(r) = *args++;
+       UPT_SYSCALL_ARG2(r) = *args++;
+       UPT_SYSCALL_ARG3(r) = *args++;
+       UPT_SYSCALL_ARG4(r) = *args++;
+       UPT_SYSCALL_ARG5(r) = *args++;
+       UPT_SYSCALL_ARG6(r) = *args;
 }
 
 /* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */
index dce6db147f24563eb14310aaabf76cada9a878bb..70ee6038390060a03dd84cbfa7e14ca249be1fd7 100644 (file)
@@ -2,162 +2,8 @@
 #ifndef __UM_TLB_H
 #define __UM_TLB_H
 
-#include <linux/pagemap.h>
-#include <linux/swap.h>
-#include <asm/percpu.h>
-#include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
-
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
-/* struct mmu_gather is an opaque type used by the mm code for passing around
- * any data needed by arch specific code for tlb_remove_page.
- */
-struct mmu_gather {
-       struct mm_struct        *mm;
-       unsigned int            need_flush; /* Really unmapped some ptes? */
-       unsigned long           start;
-       unsigned long           end;
-       unsigned int            fullmm; /* non-zero means full mm flush */
-};
-
-static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
-                                         unsigned long address)
-{
-       if (tlb->start > address)
-               tlb->start = address;
-       if (tlb->end < address + PAGE_SIZE)
-               tlb->end = address + PAGE_SIZE;
-}
-
-static inline void init_tlb_gather(struct mmu_gather *tlb)
-{
-       tlb->need_flush = 0;
-
-       tlb->start = TASK_SIZE;
-       tlb->end = 0;
-
-       if (tlb->fullmm) {
-               tlb->start = 0;
-               tlb->end = TASK_SIZE;
-       }
-}
-
-static inline void
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-               unsigned long start, unsigned long end)
-{
-       tlb->mm = mm;
-       tlb->start = start;
-       tlb->end = end;
-       tlb->fullmm = !(start | (end+1));
-
-       init_tlb_gather(tlb);
-}
-
-extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
-                              unsigned long end);
-
-static inline void
-tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
-       flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
-}
-
-static inline void
-tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
-       init_tlb_gather(tlb);
-}
-
-static inline void
-tlb_flush_mmu(struct mmu_gather *tlb)
-{
-       if (!tlb->need_flush)
-               return;
-
-       tlb_flush_mmu_tlbonly(tlb);
-       tlb_flush_mmu_free(tlb);
-}
-
-/* arch_tlb_finish_mmu
- *     Called at the end of the shootdown operation to free up any resources
- *     that were required.
- */
-static inline void
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
-               unsigned long start, unsigned long end, bool force)
-{
-       if (force) {
-               tlb->start = start;
-               tlb->end = end;
-               tlb->need_flush = 1;
-       }
-       tlb_flush_mmu(tlb);
-
-       /* keep the page table cache within bounds */
-       check_pgt_cache();
-}
-
-/* tlb_remove_page
- *     Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
- *     while handling the additional races in SMP caused by other CPUs
- *     caching valid mappings in their TLBs.
- */
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
-       tlb->need_flush = 1;
-       free_page_and_swap_cache(page);
-       return false; /* avoid calling tlb_flush_mmu */
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
-       __tlb_remove_page(tlb, page);
-}
-
-static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
-                                         struct page *page, int page_size)
-{
-       return __tlb_remove_page(tlb, page);
-}
-
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
-                                       struct page *page, int page_size)
-{
-       return tlb_remove_page(tlb, page);
-}
-
-/**
- * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
- *
- * Record the fact that pte's were really umapped in ->need_flush, so we can
- * later optimise away the tlb invalidate.   This helps when userspace is
- * unmapping already-unmapped pages, which happens quite a lot.
- */
-#define tlb_remove_tlb_entry(tlb, ptep, address)               \
-       do {                                                    \
-               tlb->need_flush = 1;                            \
-               __tlb_remove_tlb_entry(tlb, ptep, address);     \
-       } while (0)
-
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)       \
-       tlb_remove_tlb_entry(tlb, ptep, address)
-
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
-                                                    unsigned int page_size)
-{
-}
-
-#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
-
-#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
-
-#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
-
-#define tlb_migrate_finish(mm) do {} while (0)
+#include <asm-generic/cacheflush.h>
+#include <asm-generic/tlb.h>
 
 #endif
index ebe7bcf62684c5312aaec16bbe5cadd4f13ff595..bd95e020d5091858b3ec377ebca5e1127c59a956 100644 (file)
@@ -63,8 +63,6 @@ static const struct stacktrace_ops dump_ops = {
 static void __save_stack_trace(struct task_struct *tsk, struct stack_trace *trace)
 {
        dump_trace(tsk, &dump_ops, trace);
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 
 void save_stack_trace(struct stack_trace *trace)
index 817d82608712ab6f603edd6514c16c2744b08584..2445dfcf64446bd1fbcadb4fe75bab9c6ea62efd 100644 (file)
@@ -20,6 +20,7 @@ config UNICORE32
        select GENERIC_IOMAP
        select MODULES_USE_ELF_REL
        select NEED_DMA_MAP_STATE
+       select MMU_GATHER_NO_RANGE if MMU
        help
          UniCore-32 is 32-bit Instruction Set Architecture,
          including a series of low-power-consumption RISC chip
@@ -38,12 +39,6 @@ config STACKTRACE_SUPPORT
 config LOCKDEP_SUPPORT
        def_bool y
 
-config RWSEM_GENERIC_SPINLOCK
-       def_bool y
-
-config RWSEM_XCHGADD_ALGORITHM
-       bool
-
 config ARCH_HAS_ILOG2_U32
        bool
 
index 1d1544b6ca74ce96e765c68cfa2f1a86a7ef7ac8..d77d953c04c1cfbe039bf207fa4db8b362e65f22 100644 (file)
@@ -18,6 +18,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
index 9cca15cdae94c706508968a5131acadbba4ca4e6..00a8477333f6db4d745d4da8ef3f7cb28807b838 100644 (file)
 #ifndef __UNICORE_TLB_H__
 #define __UNICORE_TLB_H__
 
-#define tlb_start_vma(tlb, vma)                                do { } while (0)
-#define tlb_end_vma(tlb, vma)                          do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address)     do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+/*
+ * unicore32 lacks an efficient flush_tlb_range(), use flush_tlb_mm().
+ */
 
 #define __pte_free_tlb(tlb, pte, addr)                         \
        do {                                                    \
index 755bb11323d8feb92d12349bf444f151cd8acc29..1c72f04ff75da1a7f6918f00b14116a183a79313 100644 (file)
@@ -1,2 +1 @@
-generic-y += kvm_para.h
 generic-y += ucontext.h
index 9976e767d51c2eca3803c1dd5f4210549ccf2dc1..e37da8c6837be5782ea968cbf96f379d88c905f9 100644 (file)
@@ -120,8 +120,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
        }
 
        walk_stackframe(&frame, save_trace, &data);
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 
 void save_stack_trace(struct stack_trace *trace)
index c1f9b3cf437c3aa9018ff3288282e575f870424c..90e2640ade75416c9b328c970bf63754f0ccc951 100644 (file)
@@ -74,6 +74,7 @@ config X86
        select ARCH_MIGHT_HAVE_ACPI_PDC         if ACPI
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_MIGHT_HAVE_PC_SERIO
+       select ARCH_STACKWALK
        select ARCH_SUPPORTS_ACPI
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_SUPPORTS_NUMA_BALANCING     if X86_64
@@ -183,7 +184,6 @@ config X86
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
        select HAVE_RCU_TABLE_FREE              if PARAVIRT
-       select HAVE_RCU_TABLE_INVALIDATE        if HAVE_RCU_TABLE_FREE
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RELIABLE_STACKTRACE         if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
        select HAVE_FUNCTION_ARG_ACCESS_API
@@ -268,9 +268,6 @@ config ARCH_MAY_HAVE_PC_FDC
        def_bool y
        depends on ISA_DMA_API
 
-config RWSEM_XCHGADD_ALGORITHM
-       def_bool y
-
 config GENERIC_CALIBRATE_DELAY
        def_bool y
 
@@ -783,14 +780,6 @@ config PARAVIRT_SPINLOCKS
 
          If you are unsure how to answer this question, answer Y.
 
-config QUEUED_LOCK_STAT
-       bool "Paravirt queued spinlock statistics"
-       depends on PARAVIRT_SPINLOCKS && DEBUG_FS
-       ---help---
-         Enable the collection of statistical data on the slowpath
-         behavior of paravirtualized queued spinlocks and report
-         them on debugfs.
-
 source "arch/x86/xen/Kconfig"
 
 config KVM_GUEST
@@ -1499,7 +1488,7 @@ config X86_CPA_STATISTICS
        depends on DEBUG_FS
        ---help---
          Expose statistics about the Change Page Attribute mechanims, which
-         helps to determine the effectivness of preserving large and huge
+         helps to determine the effectiveness of preserving large and huge
          page mappings when mapping protections are changed.
 
 config ARCH_HAS_MEM_ENCRYPT
@@ -2217,14 +2206,8 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
           If unsure, leave at the default value.
 
 config HOTPLUG_CPU
-       bool "Support for hot-pluggable CPUs"
+       def_bool y
        depends on SMP
-       ---help---
-         Say Y here to allow turning CPUs off and on. CPUs can be
-         controlled through /sys/devices/system/cpu.
-         ( Note: power management support will enable this option
-           automatically on SMP systems. )
-         Say N if you want to disable CPU hotplug.
 
 config BOOTPARAM_HOTPLUG_CPU0
        bool "Set default setting of cpu0_hotpluggable"
index 2d8b9d8ca4f8753291bb1487fb0b77d7b6009280..a587805c6687f6721ae8140da8144701c9abb49b 100644 (file)
@@ -219,8 +219,12 @@ ifdef CONFIG_RETPOLINE
   # Additionally, avoid generating expensive indirect jumps which
   # are subject to retpolines for small number of switch cases.
   # clang turns off jump table generation by default when under
-  # retpoline builds, however, gcc does not for x86.
-  KBUILD_CFLAGS += $(call cc-option,--param=case-values-threshold=20)
+  # retpoline builds, however, gcc does not for x86. This has
+  # only been fixed starting from gcc stable version 8.4.0 and
+  # onwards, but not for older ones. See gcc bug #86952.
+  ifndef CONFIG_CC_IS_CLANG
+    KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
+  endif
 endif
 
 archscripts: scripts_basic
index c0d6c560df69e0e63941a34539660770304ff612..5a237e8dbf8d563504a6cfcb4a67a2b7350bed0a 100644 (file)
@@ -352,7 +352,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
        boot_params->hdr.loadflags &= ~KASLR_FLAG;
 
        /* Save RSDP address for later use. */
-       boot_params->acpi_rsdp_addr = get_rsdp_addr();
+       /* boot_params->acpi_rsdp_addr = get_rsdp_addr(); */
 
        sanitize_boot_params(boot_params);
 
index fd13655e0f9b016baba58a3ed41832284d866fe3..d2f184165934c95a403faf489ec3b652850e74ff 100644 (file)
@@ -120,8 +120,6 @@ static inline void console_init(void)
 
 void set_sev_encryption_mask(void);
 
-#endif
-
 /* acpi.c */
 #ifdef CONFIG_ACPI
 acpi_physical_address get_rsdp_addr(void);
@@ -135,3 +133,5 @@ int count_immovable_mem_regions(void);
 #else
 static inline int count_immovable_mem_regions(void) { return 0; }
 #endif
+
+#endif /* BOOT_COMPRESSED_MISC_H */
index 315a67b8896b9588c78b6128014f4702cdf965af..90154df8f12504e501a2c2b287e4fabba955f033 100644 (file)
@@ -13,8 +13,9 @@
  */
 
 #include <linux/types.h>
-#include <linux/kernel.h>
+#include <linux/compiler.h>
 #include <linux/errno.h>
+#include <linux/limits.h>
 #include <asm/asm.h>
 #include "ctype.h"
 #include "string.h"
index 3b6e70d085da89775317c8e2a560625ab4799e01..8457cdd47f751167a2321ebf063eb18bdb4ef8aa 100644 (file)
@@ -323,6 +323,12 @@ ENTRY(poly1305_4block_avx2)
        vpaddq          t2,t1,t1
        vmovq           t1x,d4
 
+       # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
+       # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
+       # amount.  Careful: we must not assume the carry bits 'd0 >> 26',
+       # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
+       # integers.  It's true in a single-block implementation, but not here.
+
        # d1 += d0 >> 26
        mov             d0,%rax
        shr             $26,%rax
@@ -361,16 +367,16 @@ ENTRY(poly1305_4block_avx2)
        # h0 += (d4 >> 26) * 5
        mov             d4,%rax
        shr             $26,%rax
-       lea             (%eax,%eax,4),%eax
-       add             %eax,%ebx
+       lea             (%rax,%rax,4),%rax
+       add             %rax,%rbx
        # h4 = d4 & 0x3ffffff
        mov             d4,%rax
        and             $0x3ffffff,%eax
        mov             %eax,h4
 
        # h1 += h0 >> 26
-       mov             %ebx,%eax
-       shr             $26,%eax
+       mov             %rbx,%rax
+       shr             $26,%rax
        add             %eax,h1
        # h0 = h0 & 0x3ffffff
        andl            $0x3ffffff,%ebx
index e6add74d78a595b63789d419100b7c30b024e0fc..6f0be7a869641c92c4993e378b53c07a7d385f29 100644 (file)
@@ -253,16 +253,16 @@ ENTRY(poly1305_block_sse2)
        # h0 += (d4 >> 26) * 5
        mov             d4,%rax
        shr             $26,%rax
-       lea             (%eax,%eax,4),%eax
-       add             %eax,%ebx
+       lea             (%rax,%rax,4),%rax
+       add             %rax,%rbx
        # h4 = d4 & 0x3ffffff
        mov             d4,%rax
        and             $0x3ffffff,%eax
        mov             %eax,h4
 
        # h1 += h0 >> 26
-       mov             %ebx,%eax
-       shr             $26,%eax
+       mov             %rbx,%rax
+       shr             $26,%rax
        add             %eax,h1
        # h0 = h0 & 0x3ffffff
        andl            $0x3ffffff,%ebx
@@ -524,6 +524,12 @@ ENTRY(poly1305_2block_sse2)
        paddq           t2,t1
        movq            t1,d4
 
+       # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
+       # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
+       # amount.  Careful: we must not assume the carry bits 'd0 >> 26',
+       # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
+       # integers.  It's true in a single-block implementation, but not here.
+
        # d1 += d0 >> 26
        mov             d0,%rax
        shr             $26,%rax
@@ -562,16 +568,16 @@ ENTRY(poly1305_2block_sse2)
        # h0 += (d4 >> 26) * 5
        mov             d4,%rax
        shr             $26,%rax
-       lea             (%eax,%eax,4),%eax
-       add             %eax,%ebx
+       lea             (%rax,%rax,4),%rax
+       add             %rax,%rbx
        # h4 = d4 & 0x3ffffff
        mov             d4,%rax
        and             $0x3ffffff,%eax
        mov             %eax,h4
 
        # h1 += h0 >> 26
-       mov             %ebx,%eax
-       shr             $26,%eax
+       mov             %rbx,%rax
+       shr             $26,%rax
        add             %eax,h1
        # h0 = h0 & 0x3ffffff
        andl            $0x3ffffff,%ebx
index d309f30cf7af84e67ac38910eff4256da9c25a11..5fc76b755510a3e3387c14b603685477e81b5405 100644 (file)
@@ -650,6 +650,7 @@ ENTRY(__switch_to_asm)
        pushl   %ebx
        pushl   %edi
        pushl   %esi
+       pushfl
 
        /* switch stack */
        movl    %esp, TASK_threadsp(%eax)
@@ -672,6 +673,7 @@ ENTRY(__switch_to_asm)
 #endif
 
        /* restore callee-saved registers */
+       popfl
        popl    %esi
        popl    %edi
        popl    %ebx
index 007b3fe9d727cbc8c55f78c1734b3d28551a7dda..98c7d12b945c28380679980deab66c5633633405 100644 (file)
@@ -29,12 +29,12 @@ extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
 extern time_t __vdso_time(time_t *t);
 
 #ifdef CONFIG_PARAVIRT_CLOCK
-extern u8 pvclock_page
+extern u8 pvclock_page[PAGE_SIZE]
        __attribute__((visibility("hidden")));
 #endif
 
 #ifdef CONFIG_HYPERV_TSCPAGE
-extern u8 hvclock_page
+extern u8 hvclock_page[PAGE_SIZE]
        __attribute__((visibility("hidden")));
 #endif
 
index 7d2d7c801dba6abb226b630104d1f038242562cf..f15441b07dad8a94b914299e3d0ed1b5fc909677 100644 (file)
@@ -3,10 +3,14 @@
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/delay.h>
 #include <asm/apicdef.h>
+#include <asm/nmi.h>
 
 #include "../perf_event.h"
 
+static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
+
 static __initconst const u64 amd_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -112,23 +116,144 @@ static __initconst const u64 amd_hw_cache_event_ids
  },
 };
 
+static __initconst const u64 amd_hw_cache_event_ids_f17h
+                               [PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_COUNT_HW_CACHE_OP_MAX]
+                               [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
+               [C(RESULT_MISS)]   = 0xc860, /* L2$ access from DC Miss */
+       },
+       [C(OP_WRITE)] = {
+               [C(RESULT_ACCESS)] = 0,
+               [C(RESULT_MISS)]   = 0,
+       },
+       [C(OP_PREFETCH)] = {
+               [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
+               [C(RESULT_MISS)]   = 0,
+       },
+},
+[C(L1I)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches  */
+               [C(RESULT_MISS)]   = 0x0081, /* Instruction cache misses   */
+       },
+       [C(OP_WRITE)] = {
+               [C(RESULT_ACCESS)] = -1,
+               [C(RESULT_MISS)]   = -1,
+       },
+       [C(OP_PREFETCH)] = {
+               [C(RESULT_ACCESS)] = 0,
+               [C(RESULT_MISS)]   = 0,
+       },
+},
+[C(LL)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = 0,
+               [C(RESULT_MISS)]   = 0,
+       },
+       [C(OP_WRITE)] = {
+               [C(RESULT_ACCESS)] = 0,
+               [C(RESULT_MISS)]   = 0,
+       },
+       [C(OP_PREFETCH)] = {
+               [C(RESULT_ACCESS)] = 0,
+               [C(RESULT_MISS)]   = 0,
+       },
+},
+[C(DTLB)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
+               [C(RESULT_MISS)]   = 0xf045, /* L2 DTLB misses (PT walks) */
+       },
+       [C(OP_WRITE)] = {
+               [C(RESULT_ACCESS)] = 0,
+               [C(RESULT_MISS)]   = 0,
+       },
+       [C(OP_PREFETCH)] = {
+               [C(RESULT_ACCESS)] = 0,
+               [C(RESULT_MISS)]   = 0,
+       },
+},
+[C(ITLB)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
+               [C(RESULT_MISS)]   = 0xff85, /* L1 ITLB misses, L2 misses */
+       },
+       [C(OP_WRITE)] = {
+               [C(RESULT_ACCESS)] = -1,
+               [C(RESULT_MISS)]   = -1,
+       },
+       [C(OP_PREFETCH)] = {
+               [C(RESULT_ACCESS)] = -1,
+               [C(RESULT_MISS)]   = -1,
+       },
+},
+[C(BPU)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr.      */
+               [C(RESULT_MISS)]   = 0x00c3, /* Retired Mispredicted BI    */
+       },
+       [C(OP_WRITE)] = {
+               [C(RESULT_ACCESS)] = -1,
+               [C(RESULT_MISS)]   = -1,
+       },
+       [C(OP_PREFETCH)] = {
+               [C(RESULT_ACCESS)] = -1,
+               [C(RESULT_MISS)]   = -1,
+       },
+},
+[C(NODE)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = 0,
+               [C(RESULT_MISS)]   = 0,
+       },
+       [C(OP_WRITE)] = {
+               [C(RESULT_ACCESS)] = -1,
+               [C(RESULT_MISS)]   = -1,
+       },
+       [C(OP_PREFETCH)] = {
+               [C(RESULT_ACCESS)] = -1,
+               [C(RESULT_MISS)]   = -1,
+       },
+},
+};
+
 /*
- * AMD Performance Monitor K7 and later.
+ * AMD Performance Monitor K7 and later, up to and including Family 16h:
  */
 static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
 {
-  [PERF_COUNT_HW_CPU_CYCLES]                   = 0x0076,
-  [PERF_COUNT_HW_INSTRUCTIONS]                 = 0x00c0,
-  [PERF_COUNT_HW_CACHE_REFERENCES]             = 0x077d,
-  [PERF_COUNT_HW_CACHE_MISSES]                 = 0x077e,
-  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]          = 0x00c2,
-  [PERF_COUNT_HW_BRANCH_MISSES]                        = 0x00c3,
-  [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]      = 0x00d0, /* "Decoder empty" event */
-  [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]       = 0x00d1, /* "Dispatch stalls" event */
+       [PERF_COUNT_HW_CPU_CYCLES]              = 0x0076,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x077d,
+       [PERF_COUNT_HW_CACHE_MISSES]            = 0x077e,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c2,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c3,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = 0x00d1, /* "Dispatch stalls" event */
+};
+
+/*
+ * AMD Performance Monitor Family 17h and later:
+ */
+static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
+{
+       [PERF_COUNT_HW_CPU_CYCLES]              = 0x0076,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = 0xff60,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c2,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c3,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = 0x0187,
 };
 
 static u64 amd_pmu_event_map(int hw_event)
 {
+       if (boot_cpu_data.x86 >= 0x17)
+               return amd_f17h_perfmon_event_map[hw_event];
+
        return amd_perfmon_event_map[hw_event];
 }
 
@@ -429,6 +554,132 @@ static void amd_pmu_cpu_dead(int cpu)
        }
 }
 
+/*
+ * When a PMC counter overflows, an NMI is used to process the event and
+ * reset the counter. NMI latency can result in the counter being updated
+ * before the NMI can run, which can result in what appear to be spurious
+ * NMIs. This function is intended to wait for the NMI to run and reset
+ * the counter to avoid possible unhandled NMI messages.
+ */
+#define OVERFLOW_WAIT_COUNT    50
+
+static void amd_pmu_wait_on_overflow(int idx)
+{
+       unsigned int i;
+       u64 counter;
+
+       /*
+        * Wait for the counter to be reset if it has overflowed. This loop
+        * should exit very, very quickly, but just in case, don't wait
+        * forever...
+        */
+       for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
+               rdmsrl(x86_pmu_event_addr(idx), counter);
+               if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
+                       break;
+
+               /* Might be in IRQ context, so can't sleep */
+               udelay(1);
+       }
+}
+
+static void amd_pmu_disable_all(void)
+{
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       int idx;
+
+       x86_pmu_disable_all();
+
+       /*
+        * This shouldn't be called from NMI context, but add a safeguard here
+        * to return, since if we're in NMI context we can't wait for an NMI
+        * to reset an overflowed counter value.
+        */
+       if (in_nmi())
+               return;
+
+       /*
+        * Check each counter for overflow and wait for it to be reset by the
+        * NMI if it has overflowed. This relies on the fact that all active
+        * counters are always enabled when this function is caled and
+        * ARCH_PERFMON_EVENTSEL_INT is always set.
+        */
+       for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+               if (!test_bit(idx, cpuc->active_mask))
+                       continue;
+
+               amd_pmu_wait_on_overflow(idx);
+       }
+}
+
+static void amd_pmu_disable_event(struct perf_event *event)
+{
+       x86_pmu_disable_event(event);
+
+       /*
+        * This can be called from NMI context (via x86_pmu_stop). The counter
+        * may have overflowed, but either way, we'll never see it get reset
+        * by the NMI if we're already in the NMI. And the NMI latency support
+        * below will take care of any pending NMI that might have been
+        * generated by the overflow.
+        */
+       if (in_nmi())
+               return;
+
+       amd_pmu_wait_on_overflow(event->hw.idx);
+}
+
+/*
+ * Because of NMI latency, if multiple PMC counters are active or other sources
+ * of NMIs are received, the perf NMI handler can handle one or more overflowed
+ * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
+ * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
+ * back-to-back NMI support won't be active. This PMC handler needs to take into
+ * account that this can occur, otherwise this could result in unknown NMI
+ * messages being issued. Examples of this is PMC overflow while in the NMI
+ * handler when multiple PMCs are active or PMC overflow while handling some
+ * other source of an NMI.
+ *
+ * Attempt to mitigate this by using the number of active PMCs to determine
+ * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
+ * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
+ * number of active PMCs or 2. The value of 2 is used in case an NMI does not
+ * arrive at the LAPIC in time to be collapsed into an already pending NMI.
+ */
+static int amd_pmu_handle_irq(struct pt_regs *regs)
+{
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       int active, handled;
+
+       /*
+        * Obtain the active count before calling x86_pmu_handle_irq() since
+        * it is possible that x86_pmu_handle_irq() may make a counter
+        * inactive (through x86_pmu_stop).
+        */
+       active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
+
+       /* Process any counter overflows */
+       handled = x86_pmu_handle_irq(regs);
+
+       /*
+        * If a counter was handled, record the number of possible remaining
+        * NMIs that can occur.
+        */
+       if (handled) {
+               this_cpu_write(perf_nmi_counter,
+                              min_t(unsigned int, 2, active));
+
+               return handled;
+       }
+
+       if (!this_cpu_read(perf_nmi_counter))
+               return NMI_DONE;
+
+       this_cpu_dec(perf_nmi_counter);
+
+       return NMI_HANDLED;
+}
+
 static struct event_constraint *
 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
                          struct perf_event *event)
@@ -621,11 +872,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config)
 
 static __initconst const struct x86_pmu amd_pmu = {
        .name                   = "AMD",
-       .handle_irq             = x86_pmu_handle_irq,
-       .disable_all            = x86_pmu_disable_all,
+       .handle_irq             = amd_pmu_handle_irq,
+       .disable_all            = amd_pmu_disable_all,
        .enable_all             = x86_pmu_enable_all,
        .enable                 = x86_pmu_enable_event,
-       .disable                = x86_pmu_disable_event,
+       .disable                = amd_pmu_disable_event,
        .hw_config              = amd_pmu_hw_config,
        .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_K7_EVNTSEL0,
@@ -718,9 +969,10 @@ __init int amd_pmu_init(void)
                x86_pmu.amd_nb_constraints = 0;
        }
 
-       /* Events are common for all AMDs */
-       memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
-              sizeof(hw_cache_event_ids));
+       if (boot_cpu_data.x86 >= 0x17)
+               memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
+       else
+               memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
 
        return 0;
 }
@@ -732,7 +984,7 @@ void amd_pmu_enable_virt(void)
        cpuc->perf_ctr_virt_mask = 0;
 
        /* Reload all events */
-       x86_pmu_disable_all();
+       amd_pmu_disable_all();
        x86_pmu_enable_all(0);
 }
 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
@@ -750,7 +1002,7 @@ void amd_pmu_disable_virt(void)
        cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
 
        /* Reload all events */
-       x86_pmu_disable_all();
+       amd_pmu_disable_all();
        x86_pmu_enable_all(0);
 }
 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
index e2b1447192a888ffafb2883ddbdfbbd37c1e9315..f315425d8468f473bf11e95c6039b59ac6b318a4 100644 (file)
@@ -560,6 +560,21 @@ int x86_pmu_hw_config(struct perf_event *event)
                        return -EINVAL;
        }
 
+       /* sample_regs_user never support XMM registers */
+       if (unlikely(event->attr.sample_regs_user & PEBS_XMM_REGS))
+               return -EINVAL;
+       /*
+        * Besides the general purpose registers, XMM registers may
+        * be collected in PEBS on some platforms, e.g. Icelake
+        */
+       if (unlikely(event->attr.sample_regs_intr & PEBS_XMM_REGS)) {
+               if (x86_pmu.pebs_no_xmm_regs)
+                       return -EINVAL;
+
+               if (!event->attr.precise_ip)
+                       return -EINVAL;
+       }
+
        return x86_setup_perfctr(event);
 }
 
@@ -661,6 +676,10 @@ static inline int is_x86_event(struct perf_event *event)
        return event->pmu == &pmu;
 }
 
+struct pmu *x86_get_pmu(void)
+{
+       return &pmu;
+}
 /*
  * Event scheduler state:
  *
@@ -849,18 +868,43 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
        struct event_constraint *c;
        unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
        struct perf_event *e;
-       int i, wmin, wmax, unsched = 0;
+       int n0, i, wmin, wmax, unsched = 0;
        struct hw_perf_event *hwc;
 
        bitmap_zero(used_mask, X86_PMC_IDX_MAX);
 
+       /*
+        * Compute the number of events already present; see x86_pmu_add(),
+        * validate_group() and x86_pmu_commit_txn(). For the former two
+        * cpuc->n_events hasn't been updated yet, while for the latter
+        * cpuc->n_txn contains the number of events added in the current
+        * transaction.
+        */
+       n0 = cpuc->n_events;
+       if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
+               n0 -= cpuc->n_txn;
+
        if (x86_pmu.start_scheduling)
                x86_pmu.start_scheduling(cpuc);
 
        for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
-               cpuc->event_constraint[i] = NULL;
-               c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
-               cpuc->event_constraint[i] = c;
+               c = cpuc->event_constraint[i];
+
+               /*
+                * Previously scheduled events should have a cached constraint,
+                * while new events should not have one.
+                */
+               WARN_ON_ONCE((c && i >= n0) || (!c && i < n0));
+
+               /*
+                * Request constraints for new events; or for those events that
+                * have a dynamic constraint -- for those the constraint can
+                * change due to external factors (sibling state, allow_tfa).
+                */
+               if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) {
+                       c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
+                       cpuc->event_constraint[i] = c;
+               }
 
                wmin = min(wmin, c->weight);
                wmax = max(wmax, c->weight);
@@ -925,25 +969,20 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
        if (!unsched && assign) {
                for (i = 0; i < n; i++) {
                        e = cpuc->event_list[i];
-                       e->hw.flags |= PERF_X86_EVENT_COMMITTED;
                        if (x86_pmu.commit_scheduling)
                                x86_pmu.commit_scheduling(cpuc, i, assign[i]);
                }
        } else {
-               for (i = 0; i < n; i++) {
+               for (i = n0; i < n; i++) {
                        e = cpuc->event_list[i];
-                       /*
-                        * do not put_constraint() on comitted events,
-                        * because they are good to go
-                        */
-                       if ((e->hw.flags & PERF_X86_EVENT_COMMITTED))
-                               continue;
 
                        /*
                         * release events that failed scheduling
                         */
                        if (x86_pmu.put_event_constraints)
                                x86_pmu.put_event_constraints(cpuc, e);
+
+                       cpuc->event_constraint[i] = NULL;
                }
        }
 
@@ -1349,8 +1388,9 @@ void x86_pmu_stop(struct perf_event *event, int flags)
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
 
-       if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
+       if (test_bit(hwc->idx, cpuc->active_mask)) {
                x86_pmu.disable(event);
+               __clear_bit(hwc->idx, cpuc->active_mask);
                cpuc->events[hwc->idx] = NULL;
                WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
                hwc->state |= PERF_HES_STOPPED;
@@ -1371,11 +1411,6 @@ static void x86_pmu_del(struct perf_event *event, int flags)
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        int i;
 
-       /*
-        * event is descheduled
-        */
-       event->hw.flags &= ~PERF_X86_EVENT_COMMITTED;
-
        /*
         * If we're called during a txn, we only need to undo x86_pmu.add.
         * The events never got scheduled and ->cancel_txn will truncate
@@ -1412,6 +1447,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
                cpuc->event_list[i-1] = cpuc->event_list[i];
                cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
        }
+       cpuc->event_constraint[i-1] = NULL;
        --cpuc->n_events;
 
        perf_event_update_userpage(event);
@@ -1447,16 +1483,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
        apic_write(APIC_LVTPC, APIC_DM_NMI);
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               if (!test_bit(idx, cpuc->active_mask)) {
-                       /*
-                        * Though we deactivated the counter some cpus
-                        * might still deliver spurious interrupts still
-                        * in flight. Catch them:
-                        */
-                       if (__test_and_clear_bit(idx, cpuc->running))
-                               handled++;
+               if (!test_bit(idx, cpuc->active_mask))
                        continue;
-               }
 
                event = cpuc->events[idx];
 
@@ -2031,7 +2059,7 @@ static int validate_event(struct perf_event *event)
        if (IS_ERR(fake_cpuc))
                return PTR_ERR(fake_cpuc);
 
-       c = x86_pmu.get_event_constraints(fake_cpuc, -1, event);
+       c = x86_pmu.get_event_constraints(fake_cpuc, 0, event);
 
        if (!c || !c->weight)
                ret = -EINVAL;
@@ -2079,8 +2107,7 @@ static int validate_group(struct perf_event *event)
        if (n < 0)
                goto out;
 
-       fake_cpuc->n_events = n;
-
+       fake_cpuc->n_events = 0;
        ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
 
 out:
@@ -2355,6 +2382,15 @@ void arch_perf_update_userpage(struct perf_event *event,
        cyc2ns_read_end();
 }
 
+/*
+ * Determine whether the regs were taken from an irq/exception handler rather
+ * than from perf_arch_fetch_caller_regs().
+ */
+static bool perf_hw_regs(struct pt_regs *regs)
+{
+       return regs->flags & X86_EFLAGS_FIXED;
+}
+
 void
 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
@@ -2366,11 +2402,15 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
                return;
        }
 
-       if (perf_callchain_store(entry, regs->ip))
-               return;
+       if (perf_hw_regs(regs)) {
+               if (perf_callchain_store(entry, regs->ip))
+                       return;
+               unwind_start(&state, current, regs, NULL);
+       } else {
+               unwind_start(&state, current, NULL, (void *)regs->sp);
+       }
 
-       for (unwind_start(&state, current, regs, NULL); !unwind_done(&state);
-            unwind_next_frame(&state)) {
+       for (; !unwind_done(&state); unwind_next_frame(&state)) {
                addr = unwind_get_return_address(&state);
                if (!addr || perf_callchain_store(entry, addr))
                        return;
index 8baa441d8000f6c4efbde5afe72d4e5a518d2184..ef763f535e3abbd034857ad48a678c1281a358c4 100644 (file)
@@ -239,6 +239,35 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
        EVENT_EXTRA_END
 };
 
+static struct event_constraint intel_icl_event_constraints[] = {
+       FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
+       INTEL_UEVENT_CONSTRAINT(0x1c0, 0),      /* INST_RETIRED.PREC_DIST */
+       FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
+       FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
+       FIXED_EVENT_CONSTRAINT(0x0400, 3),      /* SLOTS */
+       INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
+       INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
+       INTEL_EVENT_CONSTRAINT(0x32, 0xf),      /* SW_PREFETCH_ACCESS.* */
+       INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
+       INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
+       INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_TOTAL */
+       INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
+       INTEL_EVENT_CONSTRAINT(0xa3, 0xf),      /* CYCLE_ACTIVITY.* */
+       INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
+       INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
+       INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
+       INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
+       EVENT_CONSTRAINT_END
+};
+
+static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
+       INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff9fffull, RSP_0),
+       INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff9fffull, RSP_1),
+       INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+       INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
+       EVENT_EXTRA_END
+};
+
 EVENT_ATTR_STR(mem-loads,      mem_ld_nhm,     "event=0x0b,umask=0x10,ldlat=3");
 EVENT_ATTR_STR(mem-loads,      mem_ld_snb,     "event=0xcd,umask=0x1,ldlat=3");
 EVENT_ATTR_STR(mem-stores,     mem_st_snb,     "event=0xcd,umask=0x2");
@@ -1827,6 +1856,45 @@ static __initconst const u64 glp_hw_cache_extra_regs
        },
 };
 
+#define TNT_LOCAL_DRAM                 BIT_ULL(26)
+#define TNT_DEMAND_READ                        GLM_DEMAND_DATA_RD
+#define TNT_DEMAND_WRITE               GLM_DEMAND_RFO
+#define TNT_LLC_ACCESS                 GLM_ANY_RESPONSE
+#define TNT_SNP_ANY                    (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
+                                        SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
+#define TNT_LLC_MISS                   (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
+
+static __initconst const u64 tnt_hw_cache_extra_regs
+                               [PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_COUNT_HW_CACHE_OP_MAX]
+                               [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+       [C(LL)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = TNT_DEMAND_READ|
+                                                 TNT_LLC_ACCESS,
+                       [C(RESULT_MISS)]        = TNT_DEMAND_READ|
+                                                 TNT_LLC_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = TNT_DEMAND_WRITE|
+                                                 TNT_LLC_ACCESS,
+                       [C(RESULT_MISS)]        = TNT_DEMAND_WRITE|
+                                                 TNT_LLC_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = 0x0,
+                       [C(RESULT_MISS)]        = 0x0,
+               },
+       },
+};
+
+static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
+       /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
+       INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffffff9fffull, RSP_0),
+       INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xffffff9fffull, RSP_1),
+       EVENT_EXTRA_END
+};
+
 #define KNL_OT_L2_HITE         BIT_ULL(19) /* Other Tile L2 Hit */
 #define KNL_OT_L2_HITF         BIT_ULL(20) /* Other Tile L2 Hit */
 #define KNL_MCDRAM_LOCAL       BIT_ULL(21)
@@ -2015,7 +2083,7 @@ static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int
        /*
         * We're going to use PMC3, make sure TFA is set before we touch it.
         */
-       if (cntr == 3 && !cpuc->is_fake)
+       if (cntr == 3)
                intel_set_tfa(cpuc, true);
 }
 
@@ -2091,15 +2159,19 @@ static void intel_pmu_disable_event(struct perf_event *event)
        cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
        cpuc->intel_cp_status &= ~(1ull << hwc->idx);
 
-       if (unlikely(event->attr.precise_ip))
-               intel_pmu_pebs_disable(event);
-
        if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
                intel_pmu_disable_fixed(hwc);
                return;
        }
 
        x86_pmu_disable_event(event);
+
+       /*
+        * Needs to be called after x86_pmu_disable_event,
+        * so we don't trigger the event without PEBS bit set.
+        */
+       if (unlikely(event->attr.precise_ip))
+               intel_pmu_pebs_disable(event);
 }
 
 static void intel_pmu_del_event(struct perf_event *event)
@@ -2145,6 +2217,11 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
        bits <<= (idx * 4);
        mask = 0xfULL << (idx * 4);
 
+       if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
+               bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
+               mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
+       }
+
        rdmsrl(hwc->config_base, ctrl_val);
        ctrl_val &= ~mask;
        ctrl_val |= bits;
@@ -2688,7 +2765,7 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
 
        if (x86_pmu.event_constraints) {
                for_each_event_constraint(c, x86_pmu.event_constraints) {
-                       if ((event->hw.config & c->cmask) == c->code) {
+                       if (constraint_match(c, event->hw.config)) {
                                event->hw.flags |= c->flags;
                                return c;
                        }
@@ -2838,7 +2915,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
        struct intel_excl_states *xlo;
        int tid = cpuc->excl_thread_id;
-       int is_excl, i;
+       int is_excl, i, w;
 
        /*
         * validating a group does not require
@@ -2894,36 +2971,40 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
         * SHARED   : sibling counter measuring non-exclusive event
         * UNUSED   : sibling counter unused
         */
+       w = c->weight;
        for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
                /*
                 * exclusive event in sibling counter
                 * our corresponding counter cannot be used
                 * regardless of our event
                 */
-               if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE)
+               if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
                        __clear_bit(i, c->idxmsk);
+                       w--;
+                       continue;
+               }
                /*
                 * if measuring an exclusive event, sibling
                 * measuring non-exclusive, then counter cannot
                 * be used
                 */
-               if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED)
+               if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
                        __clear_bit(i, c->idxmsk);
+                       w--;
+                       continue;
+               }
        }
 
-       /*
-        * recompute actual bit weight for scheduling algorithm
-        */
-       c->weight = hweight64(c->idxmsk64);
-
        /*
         * if we return an empty mask, then switch
         * back to static empty constraint to avoid
         * the cost of freeing later on
         */
-       if (c->weight == 0)
+       if (!w)
                c = &emptyconstraint;
 
+       c->weight = w;
+
        return c;
 }
 
@@ -2931,11 +3012,9 @@ static struct event_constraint *
 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
                            struct perf_event *event)
 {
-       struct event_constraint *c1 = NULL;
-       struct event_constraint *c2;
+       struct event_constraint *c1, *c2;
 
-       if (idx >= 0) /* fake does < 0 */
-               c1 = cpuc->event_constraint[idx];
+       c1 = cpuc->event_constraint[idx];
 
        /*
         * first time only
@@ -2943,7 +3022,8 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
         * - dynamic constraint: handled by intel_get_excl_constraints()
         */
        c2 = __intel_get_event_constraints(cpuc, idx, event);
-       if (c1 && (c1->flags & PERF_X86_EVENT_DYNAMIC)) {
+       if (c1) {
+               WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
                bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
                c1->weight = c2->weight;
                c2 = c1;
@@ -3131,7 +3211,7 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
                flags &= ~PERF_SAMPLE_TIME;
        if (!event->attr.exclude_kernel)
                flags &= ~PERF_SAMPLE_REGS_USER;
-       if (event->attr.sample_regs_user & ~PEBS_REGS)
+       if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
                flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
        return flags;
 }
@@ -3185,7 +3265,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
                return ret;
 
        if (event->attr.precise_ip) {
-               if (!event->attr.freq) {
+               if (!(event->attr.freq || event->attr.wakeup_events)) {
                        event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
                        if (!(event->attr.sample_type &
                              ~intel_pmu_large_pebs_flags(event)))
@@ -3366,6 +3446,12 @@ static struct event_constraint counter0_constraint =
 static struct event_constraint counter2_constraint =
                        EVENT_CONSTRAINT(0, 0x4, 0);
 
+static struct event_constraint fixed0_constraint =
+                       FIXED_EVENT_CONSTRAINT(0x00c0, 0);
+
+static struct event_constraint fixed0_counter0_constraint =
+                       INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
+
 static struct event_constraint *
 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
                          struct perf_event *event)
@@ -3384,6 +3470,21 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
        return c;
 }
 
+static struct event_constraint *
+icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+                         struct perf_event *event)
+{
+       /*
+        * Fixed counter 0 has less skid.
+        * Force instruction:ppp in Fixed counter 0
+        */
+       if ((event->attr.precise_ip == 3) &&
+           constraint_match(&fixed0_constraint, event->hw.config))
+               return &fixed0_constraint;
+
+       return hsw_get_event_constraints(cpuc, idx, event);
+}
+
 static struct event_constraint *
 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
                          struct perf_event *event)
@@ -3399,6 +3500,29 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
        return c;
 }
 
+static struct event_constraint *
+tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+                         struct perf_event *event)
+{
+       struct event_constraint *c;
+
+       /*
+        * :ppp means to do reduced skid PEBS,
+        * which is available on PMC0 and fixed counter 0.
+        */
+       if (event->attr.precise_ip == 3) {
+               /* Force instruction:ppp on PMC0 and Fixed counter 0 */
+               if (constraint_match(&fixed0_constraint, event->hw.config))
+                       return &fixed0_counter0_constraint;
+
+               return &counter0_constraint;
+       }
+
+       c = intel_get_event_constraints(cpuc, idx, event);
+
+       return c;
+}
+
 static bool allow_tsx_force_abort = true;
 
 static struct event_constraint *
@@ -3410,7 +3534,7 @@ tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
        /*
         * Without TFA we must not use PMC3.
         */
-       if (!allow_tsx_force_abort && test_bit(3, c->idxmsk) && idx >= 0) {
+       if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
                c = dyn_constraint(cpuc, c, idx);
                c->idxmsk64 &= ~(1ULL << 3);
                c->weight--;
@@ -3507,6 +3631,8 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
 
 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
 {
+       cpuc->pebs_record_size = x86_pmu.pebs_record_size;
+
        if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
                cpuc->shared_regs = allocate_shared_regs(cpu);
                if (!cpuc->shared_regs)
@@ -3575,6 +3701,12 @@ static void intel_pmu_cpu_starting(int cpu)
 
        cpuc->lbr_sel = NULL;
 
+       if (x86_pmu.flags & PMU_FL_TFA) {
+               WARN_ON_ONCE(cpuc->tfa_shadow);
+               cpuc->tfa_shadow = ~0ULL;
+               intel_set_tfa(cpuc, false);
+       }
+
        if (x86_pmu.version > 1)
                flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
 
@@ -4108,6 +4240,42 @@ static struct attribute *hsw_tsx_events_attrs[] = {
        NULL
 };
 
+EVENT_ATTR_STR(tx-capacity-read,  tx_capacity_read,  "event=0x54,umask=0x80");
+EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
+EVENT_ATTR_STR(el-capacity-read,  el_capacity_read,  "event=0x54,umask=0x80");
+EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
+
+static struct attribute *icl_events_attrs[] = {
+       EVENT_PTR(mem_ld_hsw),
+       EVENT_PTR(mem_st_hsw),
+       NULL,
+};
+
+static struct attribute *icl_tsx_events_attrs[] = {
+       EVENT_PTR(tx_start),
+       EVENT_PTR(tx_abort),
+       EVENT_PTR(tx_commit),
+       EVENT_PTR(tx_capacity_read),
+       EVENT_PTR(tx_capacity_write),
+       EVENT_PTR(tx_conflict),
+       EVENT_PTR(el_start),
+       EVENT_PTR(el_abort),
+       EVENT_PTR(el_commit),
+       EVENT_PTR(el_capacity_read),
+       EVENT_PTR(el_capacity_write),
+       EVENT_PTR(el_conflict),
+       EVENT_PTR(cycles_t),
+       EVENT_PTR(cycles_ct),
+       NULL,
+};
+
+static __init struct attribute **get_icl_events_attrs(void)
+{
+       return boot_cpu_has(X86_FEATURE_RTM) ?
+               merge_attr(icl_events_attrs, icl_tsx_events_attrs) :
+               icl_events_attrs;
+}
+
 static ssize_t freeze_on_smi_show(struct device *cdev,
                                  struct device_attribute *attr,
                                  char *buf)
@@ -4147,6 +4315,50 @@ done:
        return count;
 }
 
+static void update_tfa_sched(void *ignored)
+{
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+       /*
+        * check if PMC3 is used
+        * and if so force schedule out for all event types all contexts
+        */
+       if (test_bit(3, cpuc->active_mask))
+               perf_pmu_resched(x86_get_pmu());
+}
+
+static ssize_t show_sysctl_tfa(struct device *cdev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
+}
+
+static ssize_t set_sysctl_tfa(struct device *cdev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t count)
+{
+       bool val;
+       ssize_t ret;
+
+       ret = kstrtobool(buf, &val);
+       if (ret)
+               return ret;
+
+       /* no change */
+       if (val == allow_tsx_force_abort)
+               return count;
+
+       allow_tsx_force_abort = val;
+
+       get_online_cpus();
+       on_each_cpu(update_tfa_sched, NULL, 1);
+       put_online_cpus();
+
+       return count;
+}
+
+
 static DEVICE_ATTR_RW(freeze_on_smi);
 
 static ssize_t branches_show(struct device *cdev,
@@ -4179,7 +4391,9 @@ static struct attribute *intel_pmu_caps_attrs[] = {
        NULL
 };
 
-static DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
+static DEVICE_ATTR(allow_tsx_force_abort, 0644,
+                  show_sysctl_tfa,
+                  set_sysctl_tfa);
 
 static struct attribute *intel_pmu_attrs[] = {
        &dev_attr_freeze_on_smi.attr,
@@ -4440,6 +4654,32 @@ __init int intel_pmu_init(void)
                name = "goldmont_plus";
                break;
 
+       case INTEL_FAM6_ATOM_TREMONT_X:
+               x86_pmu.late_ack = true;
+               memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
+                      sizeof(hw_cache_event_ids));
+               memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
+                      sizeof(hw_cache_extra_regs));
+               hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
+
+               intel_pmu_lbr_init_skl();
+
+               x86_pmu.event_constraints = intel_slm_event_constraints;
+               x86_pmu.extra_regs = intel_tnt_extra_regs;
+               /*
+                * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
+                * for precise cycles.
+                */
+               x86_pmu.pebs_aliases = NULL;
+               x86_pmu.pebs_prec_dist = true;
+               x86_pmu.lbr_pt_coexist = true;
+               x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+               x86_pmu.get_event_constraints = tnt_get_event_constraints;
+               extra_attr = slm_format_attr;
+               pr_cont("Tremont events, ");
+               name = "Tremont";
+               break;
+
        case INTEL_FAM6_WESTMERE:
        case INTEL_FAM6_WESTMERE_EP:
        case INTEL_FAM6_WESTMERE_EX:
@@ -4688,13 +4928,41 @@ __init int intel_pmu_init(void)
                        x86_pmu.get_event_constraints = tfa_get_event_constraints;
                        x86_pmu.enable_all = intel_tfa_pmu_enable_all;
                        x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
-                       intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
+                       intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr;
                }
 
                pr_cont("Skylake events, ");
                name = "skylake";
                break;
 
+       case INTEL_FAM6_ICELAKE_MOBILE:
+               x86_pmu.late_ack = true;
+               memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+               memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+               hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
+               intel_pmu_lbr_init_skl();
+
+               x86_pmu.event_constraints = intel_icl_event_constraints;
+               x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
+               x86_pmu.extra_regs = intel_icl_extra_regs;
+               x86_pmu.pebs_aliases = NULL;
+               x86_pmu.pebs_prec_dist = true;
+               x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+               x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
+
+               x86_pmu.hw_config = hsw_hw_config;
+               x86_pmu.get_event_constraints = icl_get_event_constraints;
+               extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
+                       hsw_format_attr : nhm_format_attr;
+               extra_attr = merge_attr(extra_attr, skl_format_attr);
+               x86_pmu.cpu_events = get_icl_events_attrs();
+               x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02);
+               x86_pmu.lbr_pt_coexist = true;
+               intel_pmu_pebs_data_source_skl(false);
+               pr_cont("Icelake events, ");
+               name = "icelake";
+               break;
+
        default:
                switch (x86_pmu.version) {
                case 1:
index 94a4b7fc75d0ecf344bade95be1cf563576250d2..6072f92cb8eaffbc141582ff56cc1c2ff840c37c 100644 (file)
  *                            Scope: Package (physical package)
  *     MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
  *                            perf code: 0x04
- *                            Available model: HSW ULT,CNL
+ *                            Available model: HSW ULT,KBL,CNL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
  *                            perf code: 0x05
- *                            Available model: HSW ULT,CNL
+ *                            Available model: HSW ULT,KBL,CNL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
  *                            perf code: 0x06
- *                            Available model: HSW ULT,GLM,CNL
+ *                            Available model: HSW ULT,KBL,GLM,CNL
  *                            Scope: Package (physical package)
  *
  */
@@ -566,8 +566,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
        X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
        X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
 
-       X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE,  snb_cstates),
-       X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE,  hswult_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates),
 
        X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates),
 
@@ -578,6 +578,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
        X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates),
 
        X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
+
+       X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates),
        { },
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
index 10c99ce1feaddf5fa196bfbd385cbd02b55ef57a..7a9f5dac5abe4a5f391d7cb3cc18afb5a23ec77b 100644 (file)
@@ -849,6 +849,26 @@ struct event_constraint intel_skl_pebs_event_constraints[] = {
        EVENT_CONSTRAINT_END
 };
 
+struct event_constraint intel_icl_pebs_event_constraints[] = {
+       INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL),   /* INST_RETIRED.PREC_DIST */
+       INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x400000000ULL),  /* SLOTS */
+
+       INTEL_PLD_CONSTRAINT(0x1cd, 0xff),                      /* MEM_TRANS_RETIRED.LOAD_LATENCY */
+       INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf),    /* MEM_INST_RETIRED.LOAD */
+       INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf),    /* MEM_INST_RETIRED.STORE */
+
+       INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */
+
+       INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf),                /* MEM_INST_RETIRED.* */
+
+       /*
+        * Everything else is handled by PMU_FL_PEBS_ALL, because we
+        * need the full constraints from the main table.
+        */
+
+       EVENT_CONSTRAINT_END
+};
+
 struct event_constraint *intel_pebs_constraints(struct perf_event *event)
 {
        struct event_constraint *c;
@@ -858,7 +878,7 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
 
        if (x86_pmu.pebs_constraints) {
                for_each_event_constraint(c, x86_pmu.pebs_constraints) {
-                       if ((event->hw.config & c->cmask) == c->code) {
+                       if (constraint_match(c, event->hw.config)) {
                                event->hw.flags |= c->flags;
                                return c;
                        }
@@ -906,17 +926,87 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
 
        if (cpuc->n_pebs == cpuc->n_large_pebs) {
                threshold = ds->pebs_absolute_maximum -
-                       reserved * x86_pmu.pebs_record_size;
+                       reserved * cpuc->pebs_record_size;
        } else {
-               threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size;
+               threshold = ds->pebs_buffer_base + cpuc->pebs_record_size;
        }
 
        ds->pebs_interrupt_threshold = threshold;
 }
 
+static void adaptive_pebs_record_size_update(void)
+{
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       u64 pebs_data_cfg = cpuc->pebs_data_cfg;
+       int sz = sizeof(struct pebs_basic);
+
+       if (pebs_data_cfg & PEBS_DATACFG_MEMINFO)
+               sz += sizeof(struct pebs_meminfo);
+       if (pebs_data_cfg & PEBS_DATACFG_GP)
+               sz += sizeof(struct pebs_gprs);
+       if (pebs_data_cfg & PEBS_DATACFG_XMMS)
+               sz += sizeof(struct pebs_xmm);
+       if (pebs_data_cfg & PEBS_DATACFG_LBRS)
+               sz += x86_pmu.lbr_nr * sizeof(struct pebs_lbr_entry);
+
+       cpuc->pebs_record_size = sz;
+}
+
+#define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC |   \
+                               PERF_SAMPLE_PHYS_ADDR | PERF_SAMPLE_WEIGHT | \
+                               PERF_SAMPLE_TRANSACTION)
+
+static u64 pebs_update_adaptive_cfg(struct perf_event *event)
+{
+       struct perf_event_attr *attr = &event->attr;
+       u64 sample_type = attr->sample_type;
+       u64 pebs_data_cfg = 0;
+       bool gprs, tsx_weight;
+
+       if (!(sample_type & ~(PERF_SAMPLE_IP|PERF_SAMPLE_TIME)) &&
+           attr->precise_ip > 1)
+               return pebs_data_cfg;
+
+       if (sample_type & PERF_PEBS_MEMINFO_TYPE)
+               pebs_data_cfg |= PEBS_DATACFG_MEMINFO;
+
+       /*
+        * We need GPRs when:
+        * + user requested them
+        * + precise_ip < 2 for the non event IP
+        * + For RTM TSX weight we need GPRs for the abort code.
+        */
+       gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
+              (attr->sample_regs_intr & PEBS_GP_REGS);
+
+       tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT) &&
+                    ((attr->config & INTEL_ARCH_EVENT_MASK) ==
+                     x86_pmu.rtm_abort_event);
+
+       if (gprs || (attr->precise_ip < 2) || tsx_weight)
+               pebs_data_cfg |= PEBS_DATACFG_GP;
+
+       if ((sample_type & PERF_SAMPLE_REGS_INTR) &&
+           (attr->sample_regs_intr & PEBS_XMM_REGS))
+               pebs_data_cfg |= PEBS_DATACFG_XMMS;
+
+       if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
+               /*
+                * For now always log all LBRs. Could configure this
+                * later.
+                */
+               pebs_data_cfg |= PEBS_DATACFG_LBRS |
+                       ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT);
+       }
+
+       return pebs_data_cfg;
+}
+
 static void
-pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, struct pmu *pmu)
+pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
+                 struct perf_event *event, bool add)
 {
+       struct pmu *pmu = event->ctx->pmu;
        /*
         * Make sure we get updated with the first PEBS
         * event. It will trigger also during removal, but
@@ -933,6 +1023,29 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, struct pmu *pmu)
                update = true;
        }
 
+       /*
+        * The PEBS record doesn't shrink on pmu::del(). Doing so would require
+        * iterating all remaining PEBS events to reconstruct the config.
+        */
+       if (x86_pmu.intel_cap.pebs_baseline && add) {
+               u64 pebs_data_cfg;
+
+               /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
+               if (cpuc->n_pebs == 1) {
+                       cpuc->pebs_data_cfg = 0;
+                       cpuc->pebs_record_size = sizeof(struct pebs_basic);
+               }
+
+               pebs_data_cfg = pebs_update_adaptive_cfg(event);
+
+               /* Update pebs_record_size if new event requires more data. */
+               if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
+                       cpuc->pebs_data_cfg |= pebs_data_cfg;
+                       adaptive_pebs_record_size_update();
+                       update = true;
+               }
+       }
+
        if (update)
                pebs_update_threshold(cpuc);
 }
@@ -947,7 +1060,7 @@ void intel_pmu_pebs_add(struct perf_event *event)
        if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
                cpuc->n_large_pebs++;
 
-       pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
+       pebs_update_state(needed_cb, cpuc, event, true);
 }
 
 void intel_pmu_pebs_enable(struct perf_event *event)
@@ -960,11 +1073,19 @@ void intel_pmu_pebs_enable(struct perf_event *event)
 
        cpuc->pebs_enabled |= 1ULL << hwc->idx;
 
-       if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
+       if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5))
                cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
        else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
                cpuc->pebs_enabled |= 1ULL << 63;
 
+       if (x86_pmu.intel_cap.pebs_baseline) {
+               hwc->config |= ICL_EVENTSEL_ADAPTIVE;
+               if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
+                       wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
+                       cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
+               }
+       }
+
        /*
         * Use auto-reload if possible to save a MSR write in the PMI.
         * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
@@ -991,7 +1112,7 @@ void intel_pmu_pebs_del(struct perf_event *event)
        if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
                cpuc->n_large_pebs--;
 
-       pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
+       pebs_update_state(needed_cb, cpuc, event, false);
 }
 
 void intel_pmu_pebs_disable(struct perf_event *event)
@@ -1004,7 +1125,8 @@ void intel_pmu_pebs_disable(struct perf_event *event)
 
        cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
 
-       if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
+       if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) &&
+           (x86_pmu.version < 5))
                cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
        else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
                cpuc->pebs_enabled &= ~(1ULL << 63);
@@ -1125,34 +1247,57 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
        return 0;
 }
 
-static inline u64 intel_hsw_weight(struct pebs_record_skl *pebs)
+static inline u64 intel_get_tsx_weight(u64 tsx_tuning)
 {
-       if (pebs->tsx_tuning) {
-               union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning };
+       if (tsx_tuning) {
+               union hsw_tsx_tuning tsx = { .value = tsx_tuning };
                return tsx.cycles_last_block;
        }
        return 0;
 }
 
-static inline u64 intel_hsw_transaction(struct pebs_record_skl *pebs)
+static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax)
 {
-       u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
+       u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
 
        /* For RTM XABORTs also log the abort code from AX */
-       if ((txn & PERF_TXN_TRANSACTION) && (pebs->ax & 1))
-               txn |= ((pebs->ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
+       if ((txn & PERF_TXN_TRANSACTION) && (ax & 1))
+               txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
        return txn;
 }
 
-static void setup_pebs_sample_data(struct perf_event *event,
-                                  struct pt_regs *iregs, void *__pebs,
-                                  struct perf_sample_data *data,
-                                  struct pt_regs *regs)
+static inline u64 get_pebs_status(void *n)
 {
+       if (x86_pmu.intel_cap.pebs_format < 4)
+               return ((struct pebs_record_nhm *)n)->status;
+       return ((struct pebs_basic *)n)->applicable_counters;
+}
+
 #define PERF_X86_EVENT_PEBS_HSW_PREC \
                (PERF_X86_EVENT_PEBS_ST_HSW | \
                 PERF_X86_EVENT_PEBS_LD_HSW | \
                 PERF_X86_EVENT_PEBS_NA_HSW)
+
+static u64 get_data_src(struct perf_event *event, u64 aux)
+{
+       u64 val = PERF_MEM_NA;
+       int fl = event->hw.flags;
+       bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
+
+       if (fl & PERF_X86_EVENT_PEBS_LDLAT)
+               val = load_latency_data(aux);
+       else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
+               val = precise_datala_hsw(event, aux);
+       else if (fst)
+               val = precise_store_data(aux);
+       return val;
+}
+
+static void setup_pebs_fixed_sample_data(struct perf_event *event,
+                                  struct pt_regs *iregs, void *__pebs,
+                                  struct perf_sample_data *data,
+                                  struct pt_regs *regs)
+{
        /*
         * We cast to the biggest pebs_record but are careful not to
         * unconditionally access the 'extra' entries.
@@ -1160,17 +1305,13 @@ static void setup_pebs_sample_data(struct perf_event *event,
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct pebs_record_skl *pebs = __pebs;
        u64 sample_type;
-       int fll, fst, dsrc;
-       int fl = event->hw.flags;
+       int fll;
 
        if (pebs == NULL)
                return;
 
        sample_type = event->attr.sample_type;
-       dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
-
-       fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
-       fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
+       fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
 
        perf_sample_data_init(data, 0, event->hw.last_period);
 
@@ -1185,16 +1326,8 @@ static void setup_pebs_sample_data(struct perf_event *event,
        /*
         * data.data_src encodes the data source
         */
-       if (dsrc) {
-               u64 val = PERF_MEM_NA;
-               if (fll)
-                       val = load_latency_data(pebs->dse);
-               else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
-                       val = precise_datala_hsw(event, pebs->dse);
-               else if (fst)
-                       val = precise_store_data(pebs->dse);
-               data->data_src.val = val;
-       }
+       if (sample_type & PERF_SAMPLE_DATA_SRC)
+               data->data_src.val = get_data_src(event, pebs->dse);
 
        /*
         * We must however always use iregs for the unwinder to stay sane; the
@@ -1281,10 +1414,11 @@ static void setup_pebs_sample_data(struct perf_event *event,
        if (x86_pmu.intel_cap.pebs_format >= 2) {
                /* Only set the TSX weight when no memory weight. */
                if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
-                       data->weight = intel_hsw_weight(pebs);
+                       data->weight = intel_get_tsx_weight(pebs->tsx_tuning);
 
                if (sample_type & PERF_SAMPLE_TRANSACTION)
-                       data->txn = intel_hsw_transaction(pebs);
+                       data->txn = intel_get_tsx_transaction(pebs->tsx_tuning,
+                                                             pebs->ax);
        }
 
        /*
@@ -1301,6 +1435,140 @@ static void setup_pebs_sample_data(struct perf_event *event,
                data->br_stack = &cpuc->lbr_stack;
 }
 
+static void adaptive_pebs_save_regs(struct pt_regs *regs,
+                                   struct pebs_gprs *gprs)
+{
+       regs->ax = gprs->ax;
+       regs->bx = gprs->bx;
+       regs->cx = gprs->cx;
+       regs->dx = gprs->dx;
+       regs->si = gprs->si;
+       regs->di = gprs->di;
+       regs->bp = gprs->bp;
+       regs->sp = gprs->sp;
+#ifndef CONFIG_X86_32
+       regs->r8 = gprs->r8;
+       regs->r9 = gprs->r9;
+       regs->r10 = gprs->r10;
+       regs->r11 = gprs->r11;
+       regs->r12 = gprs->r12;
+       regs->r13 = gprs->r13;
+       regs->r14 = gprs->r14;
+       regs->r15 = gprs->r15;
+#endif
+}
+
+/*
+ * With adaptive PEBS the layout depends on what fields are configured.
+ */
+
+static void setup_pebs_adaptive_sample_data(struct perf_event *event,
+                                           struct pt_regs *iregs, void *__pebs,
+                                           struct perf_sample_data *data,
+                                           struct pt_regs *regs)
+{
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       struct pebs_basic *basic = __pebs;
+       void *next_record = basic + 1;
+       u64 sample_type;
+       u64 format_size;
+       struct pebs_meminfo *meminfo = NULL;
+       struct pebs_gprs *gprs = NULL;
+       struct x86_perf_regs *perf_regs;
+
+       if (basic == NULL)
+               return;
+
+       perf_regs = container_of(regs, struct x86_perf_regs, regs);
+       perf_regs->xmm_regs = NULL;
+
+       sample_type = event->attr.sample_type;
+       format_size = basic->format_size;
+       perf_sample_data_init(data, 0, event->hw.last_period);
+       data->period = event->hw.last_period;
+
+       if (event->attr.use_clockid == 0)
+               data->time = native_sched_clock_from_tsc(basic->tsc);
+
+       /*
+        * We must however always use iregs for the unwinder to stay sane; the
+        * record BP,SP,IP can point into thin air when the record is from a
+        * previous PMI context or an (I)RET happened between the record and
+        * PMI.
+        */
+       if (sample_type & PERF_SAMPLE_CALLCHAIN)
+               data->callchain = perf_callchain(event, iregs);
+
+       *regs = *iregs;
+       /* The ip in basic is EventingIP */
+       set_linear_ip(regs, basic->ip);
+       regs->flags = PERF_EFLAGS_EXACT;
+
+       /*
+        * The record for MEMINFO is in front of GP
+        * But PERF_SAMPLE_TRANSACTION needs gprs->ax.
+        * Save the pointer here but process later.
+        */
+       if (format_size & PEBS_DATACFG_MEMINFO) {
+               meminfo = next_record;
+               next_record = meminfo + 1;
+       }
+
+       if (format_size & PEBS_DATACFG_GP) {
+               gprs = next_record;
+               next_record = gprs + 1;
+
+               if (event->attr.precise_ip < 2) {
+                       set_linear_ip(regs, gprs->ip);
+                       regs->flags &= ~PERF_EFLAGS_EXACT;
+               }
+
+               if (sample_type & PERF_SAMPLE_REGS_INTR)
+                       adaptive_pebs_save_regs(regs, gprs);
+       }
+
+       if (format_size & PEBS_DATACFG_MEMINFO) {
+               if (sample_type & PERF_SAMPLE_WEIGHT)
+                       data->weight = meminfo->latency ?:
+                               intel_get_tsx_weight(meminfo->tsx_tuning);
+
+               if (sample_type & PERF_SAMPLE_DATA_SRC)
+                       data->data_src.val = get_data_src(event, meminfo->aux);
+
+               if (sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR))
+                       data->addr = meminfo->address;
+
+               if (sample_type & PERF_SAMPLE_TRANSACTION)
+                       data->txn = intel_get_tsx_transaction(meminfo->tsx_tuning,
+                                                         gprs ? gprs->ax : 0);
+       }
+
+       if (format_size & PEBS_DATACFG_XMMS) {
+               struct pebs_xmm *xmm = next_record;
+
+               next_record = xmm + 1;
+               perf_regs->xmm_regs = xmm->xmm;
+       }
+
+       if (format_size & PEBS_DATACFG_LBRS) {
+               struct pebs_lbr *lbr = next_record;
+               int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT)
+                                       & 0xff) + 1;
+               next_record = next_record + num_lbr*sizeof(struct pebs_lbr_entry);
+
+               if (has_branch_stack(event)) {
+                       intel_pmu_store_pebs_lbrs(lbr);
+                       data->br_stack = &cpuc->lbr_stack;
+               }
+       }
+
+       WARN_ONCE(next_record != __pebs + (format_size >> 48),
+                       "PEBS record size %llu, expected %llu, config %llx\n",
+                       format_size >> 48,
+                       (u64)(next_record - __pebs),
+                       basic->format_size);
+}
+
 static inline void *
 get_next_pebs_record_by_bit(void *base, void *top, int bit)
 {
@@ -1318,19 +1586,19 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
        if (base == NULL)
                return NULL;
 
-       for (at = base; at < top; at += x86_pmu.pebs_record_size) {
-               struct pebs_record_nhm *p = at;
+       for (at = base; at < top; at += cpuc->pebs_record_size) {
+               unsigned long status = get_pebs_status(at);
 
-               if (test_bit(bit, (unsigned long *)&p->status)) {
+               if (test_bit(bit, (unsigned long *)&status)) {
                        /* PEBS v3 has accurate status bits */
                        if (x86_pmu.intel_cap.pebs_format >= 3)
                                return at;
 
-                       if (p->status == (1 << bit))
+                       if (status == (1 << bit))
                                return at;
 
                        /* clear non-PEBS bit and re-check */
-                       pebs_status = p->status & cpuc->pebs_enabled;
+                       pebs_status = status & cpuc->pebs_enabled;
                        pebs_status &= PEBS_COUNTER_MASK;
                        if (pebs_status == (1 << bit))
                                return at;
@@ -1410,11 +1678,18 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
 static void __intel_pmu_pebs_event(struct perf_event *event,
                                   struct pt_regs *iregs,
                                   void *base, void *top,
-                                  int bit, int count)
+                                  int bit, int count,
+                                  void (*setup_sample)(struct perf_event *,
+                                               struct pt_regs *,
+                                               void *,
+                                               struct perf_sample_data *,
+                                               struct pt_regs *))
 {
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        struct perf_sample_data data;
-       struct pt_regs regs;
+       struct x86_perf_regs perf_regs;
+       struct pt_regs *regs = &perf_regs.regs;
        void *at = get_next_pebs_record_by_bit(base, top, bit);
 
        if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
@@ -1429,20 +1704,20 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
                return;
 
        while (count > 1) {
-               setup_pebs_sample_data(event, iregs, at, &data, &regs);
-               perf_event_output(event, &data, &regs);
-               at += x86_pmu.pebs_record_size;
+               setup_sample(event, iregs, at, &data, regs);
+               perf_event_output(event, &data, regs);
+               at += cpuc->pebs_record_size;
                at = get_next_pebs_record_by_bit(at, top, bit);
                count--;
        }
 
-       setup_pebs_sample_data(event, iregs, at, &data, &regs);
+       setup_sample(event, iregs, at, &data, regs);
 
        /*
         * All but the last records are processed.
         * The last one is left to be able to call the overflow handler.
         */
-       if (perf_event_overflow(event, &data, &regs)) {
+       if (perf_event_overflow(event, &data, regs)) {
                x86_pmu_stop(event, 0);
                return;
        }
@@ -1483,7 +1758,27 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
                return;
        }
 
-       __intel_pmu_pebs_event(event, iregs, at, top, 0, n);
+       __intel_pmu_pebs_event(event, iregs, at, top, 0, n,
+                              setup_pebs_fixed_sample_data);
+}
+
+static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
+{
+       struct perf_event *event;
+       int bit;
+
+       /*
+        * The drain_pebs() could be called twice in a short period
+        * for auto-reload event in pmu::read(). There are no
+        * overflows have happened in between.
+        * It needs to call intel_pmu_save_and_restart_reload() to
+        * update the event->count for this case.
+        */
+       for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) {
+               event = cpuc->events[bit];
+               if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
+                       intel_pmu_save_and_restart_reload(event, 0);
+       }
 }
 
 static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
@@ -1513,19 +1808,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
        }
 
        if (unlikely(base >= top)) {
-               /*
-                * The drain_pebs() could be called twice in a short period
-                * for auto-reload event in pmu::read(). There are no
-                * overflows have happened in between.
-                * It needs to call intel_pmu_save_and_restart_reload() to
-                * update the event->count for this case.
-                */
-               for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
-                                size) {
-                       event = cpuc->events[bit];
-                       if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
-                               intel_pmu_save_and_restart_reload(event, 0);
-               }
+               intel_pmu_pebs_event_update_no_drain(cpuc, size);
                return;
        }
 
@@ -1538,8 +1821,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
 
                /* PEBS v3 has more accurate status bits */
                if (x86_pmu.intel_cap.pebs_format >= 3) {
-                       for_each_set_bit(bit, (unsigned long *)&pebs_status,
-                                        size)
+                       for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
                                counts[bit]++;
 
                        continue;
@@ -1578,8 +1860,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
                 * If collision happened, the record will be dropped.
                 */
                if (p->status != (1ULL << bit)) {
-                       for_each_set_bit(i, (unsigned long *)&pebs_status,
-                                        x86_pmu.max_pebs_events)
+                       for_each_set_bit(i, (unsigned long *)&pebs_status, size)
                                error[i]++;
                        continue;
                }
@@ -1587,7 +1868,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
                counts[bit]++;
        }
 
-       for (bit = 0; bit < size; bit++) {
+       for_each_set_bit(bit, (unsigned long *)&mask, size) {
                if ((counts[bit] == 0) && (error[bit] == 0))
                        continue;
 
@@ -1608,11 +1889,66 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
 
                if (counts[bit]) {
                        __intel_pmu_pebs_event(event, iregs, base,
-                                              top, bit, counts[bit]);
+                                              top, bit, counts[bit],
+                                              setup_pebs_fixed_sample_data);
                }
        }
 }
 
+static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs)
+{
+       short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       struct debug_store *ds = cpuc->ds;
+       struct perf_event *event;
+       void *base, *at, *top;
+       int bit, size;
+       u64 mask;
+
+       if (!x86_pmu.pebs_active)
+               return;
+
+       base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base;
+       top = (struct pebs_basic *)(unsigned long)ds->pebs_index;
+
+       ds->pebs_index = ds->pebs_buffer_base;
+
+       mask = ((1ULL << x86_pmu.max_pebs_events) - 1) |
+              (((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
+       size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
+
+       if (unlikely(base >= top)) {
+               intel_pmu_pebs_event_update_no_drain(cpuc, size);
+               return;
+       }
+
+       for (at = base; at < top; at += cpuc->pebs_record_size) {
+               u64 pebs_status;
+
+               pebs_status = get_pebs_status(at) & cpuc->pebs_enabled;
+               pebs_status &= mask;
+
+               for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
+                       counts[bit]++;
+       }
+
+       for_each_set_bit(bit, (unsigned long *)&mask, size) {
+               if (counts[bit] == 0)
+                       continue;
+
+               event = cpuc->events[bit];
+               if (WARN_ON_ONCE(!event))
+                       continue;
+
+               if (WARN_ON_ONCE(!event->attr.precise_ip))
+                       continue;
+
+               __intel_pmu_pebs_event(event, iregs, base,
+                                      top, bit, counts[bit],
+                                      setup_pebs_adaptive_sample_data);
+       }
+}
+
 /*
  * BTS, PEBS probe and setup
  */
@@ -1628,12 +1964,18 @@ void __init intel_ds_init(void)
        x86_pmu.bts  = boot_cpu_has(X86_FEATURE_BTS);
        x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
        x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
-       if (x86_pmu.version <= 4)
+       if (x86_pmu.version <= 4) {
                x86_pmu.pebs_no_isolation = 1;
+               x86_pmu.pebs_no_xmm_regs = 1;
+       }
        if (x86_pmu.pebs) {
                char pebs_type = x86_pmu.intel_cap.pebs_trap ?  '+' : '-';
+               char *pebs_qual = "";
                int format = x86_pmu.intel_cap.pebs_format;
 
+               if (format < 4)
+                       x86_pmu.intel_cap.pebs_baseline = 0;
+
                switch (format) {
                case 0:
                        pr_cont("PEBS fmt0%c, ", pebs_type);
@@ -1669,6 +2011,29 @@ void __init intel_ds_init(void)
                        x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
                        break;
 
+               case 4:
+                       x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl;
+                       x86_pmu.pebs_record_size = sizeof(struct pebs_basic);
+                       if (x86_pmu.intel_cap.pebs_baseline) {
+                               x86_pmu.large_pebs_flags |=
+                                       PERF_SAMPLE_BRANCH_STACK |
+                                       PERF_SAMPLE_TIME;
+                               x86_pmu.flags |= PMU_FL_PEBS_ALL;
+                               pebs_qual = "-baseline";
+                       } else {
+                               /* Only basic record supported */
+                               x86_pmu.pebs_no_xmm_regs = 1;
+                               x86_pmu.large_pebs_flags &=
+                                       ~(PERF_SAMPLE_ADDR |
+                                         PERF_SAMPLE_TIME |
+                                         PERF_SAMPLE_DATA_SRC |
+                                         PERF_SAMPLE_TRANSACTION |
+                                         PERF_SAMPLE_REGS_USER |
+                                         PERF_SAMPLE_REGS_INTR);
+                       }
+                       pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual);
+                       break;
+
                default:
                        pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
                        x86_pmu.pebs = 0;
index 580c1b91c454024cf6062b8c1013ac1f8a1d5e5a..6f814a27416b4268b94f0d83a69b022548c226e3 100644 (file)
@@ -488,6 +488,8 @@ void intel_pmu_lbr_add(struct perf_event *event)
         * be 'new'. Conversely, a new event can get installed through the
         * context switch path for the first time.
         */
+       if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
+               cpuc->lbr_pebs_users++;
        perf_sched_cb_inc(event->ctx->pmu);
        if (!cpuc->lbr_users++ && !event->total_time_running)
                intel_pmu_lbr_reset();
@@ -507,8 +509,11 @@ void intel_pmu_lbr_del(struct perf_event *event)
                task_ctx->lbr_callstack_users--;
        }
 
+       if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
+               cpuc->lbr_pebs_users--;
        cpuc->lbr_users--;
        WARN_ON_ONCE(cpuc->lbr_users < 0);
+       WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
        perf_sched_cb_dec(event->ctx->pmu);
 }
 
@@ -658,7 +663,13 @@ void intel_pmu_lbr_read(void)
 {
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 
-       if (!cpuc->lbr_users)
+       /*
+        * Don't read when all LBRs users are using adaptive PEBS.
+        *
+        * This could be smarter and actually check the event,
+        * but this simple approach seems to work for now.
+        */
+       if (!cpuc->lbr_users || cpuc->lbr_users == cpuc->lbr_pebs_users)
                return;
 
        if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
@@ -1080,6 +1091,28 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
        }
 }
 
+void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr)
+{
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       int i;
+
+       cpuc->lbr_stack.nr = x86_pmu.lbr_nr;
+       for (i = 0; i < x86_pmu.lbr_nr; i++) {
+               u64 info = lbr->lbr[i].info;
+               struct perf_branch_entry *e = &cpuc->lbr_entries[i];
+
+               e->from         = lbr->lbr[i].from;
+               e->to           = lbr->lbr[i].to;
+               e->mispred      = !!(info & LBR_INFO_MISPRED);
+               e->predicted    = !(info & LBR_INFO_MISPRED);
+               e->in_tx        = !!(info & LBR_INFO_IN_TX);
+               e->abort        = !!(info & LBR_INFO_ABORT);
+               e->cycles       = info & LBR_INFO_CYCLES;
+               e->reserved     = 0;
+       }
+       intel_pmu_lbr_filter(cpuc);
+}
+
 /*
  * Map interface branch filters onto LBR filters
  */
index fb3a2f13fc709256e81719a229d5d3fdcde6e430..339d7628080cf2d83bcff2f17db305f0e29ffec7 100644 (file)
@@ -1525,8 +1525,7 @@ static __init int pt_init(void)
        }
 
        if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
-               pt_pmu.pmu.capabilities =
-                       PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
+               pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG;
 
        pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
        pt_pmu.pmu.attr_groups           = pt_attr_groups;
index 94dc564146ca89190cc203d2b78e6651b4742f19..37ebf6fc5415b4f89e881318e27924216cf2e0d1 100644 (file)
@@ -775,6 +775,8 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
 
        X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
+
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE,  skl_rapl_init),
        {},
 };
 
index 9fe64c01a2e5a9572386352669e0c03e833b1a61..fc40a1473058e94f793b211dfa14ebf74a05ce47 100644 (file)
@@ -1367,6 +1367,11 @@ static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
        .pci_init = skx_uncore_pci_init,
 };
 
+static const struct intel_uncore_init_fun icl_uncore_init __initconst = {
+       .cpu_init = icl_uncore_cpu_init,
+       .pci_init = skl_uncore_pci_init,
+};
+
 static const struct x86_cpu_id intel_uncore_match[] __initconst = {
        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP,     nhm_uncore_init),
        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM,        nhm_uncore_init),
@@ -1393,6 +1398,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X,      skx_uncore_init),
        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init),
        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, icl_uncore_init),
        {},
 };
 
index 853a49a8ccf6748024e7da090c8e01c0c8edaeb1..79eb2e21e4f043cc6a18a43b1fd998dcd750b594 100644 (file)
@@ -512,6 +512,7 @@ int skl_uncore_pci_init(void);
 void snb_uncore_cpu_init(void);
 void nhm_uncore_cpu_init(void);
 void skl_uncore_cpu_init(void);
+void icl_uncore_cpu_init(void);
 int snb_pci2phy_map_init(int devid);
 
 /* uncore_snbep.c */
index 13493f43b24739928c006fb0dc1fe600f21ac9a9..f8431819b3e122b279c5e87af82f29214885f59e 100644 (file)
@@ -34,6 +34,8 @@
 #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC       0x3e33
 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC       0x3eca
 #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC       0x3e32
+#define PCI_DEVICE_ID_INTEL_ICL_U_IMC          0x8a02
+#define PCI_DEVICE_ID_INTEL_ICL_U2_IMC         0x8a12
 
 /* SNB event control */
 #define SNB_UNC_CTL_EV_SEL_MASK                        0x000000ff
 #define SKL_UNC_PERF_GLOBAL_CTL                        0xe01
 #define SKL_UNC_GLOBAL_CTL_CORE_ALL            ((1 << 5) - 1)
 
+/* ICL Cbo register */
+#define ICL_UNC_CBO_CONFIG                     0x396
+#define ICL_UNC_NUM_CBO_MASK                   0xf
+#define ICL_UNC_CBO_0_PER_CTR0                 0x702
+#define ICL_UNC_CBO_MSR_OFFSET                 0x8
+
 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
@@ -280,6 +288,70 @@ void skl_uncore_cpu_init(void)
        snb_uncore_arb.ops = &skl_uncore_msr_ops;
 }
 
+static struct intel_uncore_type icl_uncore_cbox = {
+       .name           = "cbox",
+       .num_counters   = 4,
+       .perf_ctr_bits  = 44,
+       .perf_ctr       = ICL_UNC_CBO_0_PER_CTR0,
+       .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
+       .event_mask     = SNB_UNC_RAW_EVENT_MASK,
+       .msr_offset     = ICL_UNC_CBO_MSR_OFFSET,
+       .ops            = &skl_uncore_msr_ops,
+       .format_group   = &snb_uncore_format_group,
+};
+
+static struct uncore_event_desc icl_uncore_events[] = {
+       INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"),
+       { /* end: all zeroes */ },
+};
+
+static struct attribute *icl_uncore_clock_formats_attr[] = {
+       &format_attr_event.attr,
+       NULL,
+};
+
+static struct attribute_group icl_uncore_clock_format_group = {
+       .name = "format",
+       .attrs = icl_uncore_clock_formats_attr,
+};
+
+static struct intel_uncore_type icl_uncore_clockbox = {
+       .name           = "clock",
+       .num_counters   = 1,
+       .num_boxes      = 1,
+       .fixed_ctr_bits = 48,
+       .fixed_ctr      = SNB_UNC_FIXED_CTR,
+       .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
+       .single_fixed   = 1,
+       .event_mask     = SNB_UNC_CTL_EV_SEL_MASK,
+       .format_group   = &icl_uncore_clock_format_group,
+       .ops            = &skl_uncore_msr_ops,
+       .event_descs    = icl_uncore_events,
+};
+
+static struct intel_uncore_type *icl_msr_uncores[] = {
+       &icl_uncore_cbox,
+       &snb_uncore_arb,
+       &icl_uncore_clockbox,
+       NULL,
+};
+
+static int icl_get_cbox_num(void)
+{
+       u64 num_boxes;
+
+       rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes);
+
+       return num_boxes & ICL_UNC_NUM_CBO_MASK;
+}
+
+void icl_uncore_cpu_init(void)
+{
+       uncore_msr_uncores = icl_msr_uncores;
+       icl_uncore_cbox.num_boxes = icl_get_cbox_num();
+       snb_uncore_arb.ops = &skl_uncore_msr_ops;
+}
+
 enum {
        SNB_PCI_UNCORE_IMC,
 };
@@ -668,6 +740,18 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
        { /* end: all zeroes */ },
 };
 
+static const struct pci_device_id icl_uncore_pci_ids[] = {
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* end: all zeroes */ },
+};
+
 static struct pci_driver snb_uncore_pci_driver = {
        .name           = "snb_uncore",
        .id_table       = snb_uncore_pci_ids,
@@ -693,6 +777,11 @@ static struct pci_driver skl_uncore_pci_driver = {
        .id_table       = skl_uncore_pci_ids,
 };
 
+static struct pci_driver icl_uncore_pci_driver = {
+       .name           = "icl_uncore",
+       .id_table       = icl_uncore_pci_ids,
+};
+
 struct imc_uncore_pci_dev {
        __u32 pci_id;
        struct pci_driver *driver;
@@ -732,6 +821,8 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
        IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Server */
        IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Server */
        IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Server */
+       IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver),     /* 10th Gen Core Mobile */
+       IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver),    /* 10th Gen Core Mobile */
        {  /* end marker */ }
 };
 
index a878e6286e4afa0a6840d90d84f1386ee4934605..f3f4c2263501d1e6dc2390d0b4f404c487e33691 100644 (file)
@@ -89,6 +89,7 @@ static bool test_intel(int idx)
        case INTEL_FAM6_SKYLAKE_X:
        case INTEL_FAM6_KABYLAKE_MOBILE:
        case INTEL_FAM6_KABYLAKE_DESKTOP:
+       case INTEL_FAM6_ICELAKE_MOBILE:
                if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
                        return true;
                break;
index a75955741c50422b9894d454c1a33ec7c9790a77..07fc84bb85c1e9e85138cd9205045215e1d0d528 100644 (file)
@@ -49,28 +49,33 @@ struct event_constraint {
                unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
                u64             idxmsk64;
        };
-       u64     code;
-       u64     cmask;
-       int     weight;
-       int     overlap;
-       int     flags;
+       u64             code;
+       u64             cmask;
+       int             weight;
+       int             overlap;
+       int             flags;
+       unsigned int    size;
 };
+
+static inline bool constraint_match(struct event_constraint *c, u64 ecode)
+{
+       return ((ecode & c->cmask) - c->code) <= (u64)c->size;
+}
+
 /*
  * struct hw_perf_event.flags flags
  */
 #define PERF_X86_EVENT_PEBS_LDLAT      0x0001 /* ld+ldlat data address sampling */
 #define PERF_X86_EVENT_PEBS_ST         0x0002 /* st data address sampling */
 #define PERF_X86_EVENT_PEBS_ST_HSW     0x0004 /* haswell style datala, store */
-#define PERF_X86_EVENT_COMMITTED       0x0008 /* event passed commit_txn */
-#define PERF_X86_EVENT_PEBS_LD_HSW     0x0010 /* haswell style datala, load */
-#define PERF_X86_EVENT_PEBS_NA_HSW     0x0020 /* haswell style datala, unknown */
-#define PERF_X86_EVENT_EXCL            0x0040 /* HT exclusivity on counter */
-#define PERF_X86_EVENT_DYNAMIC         0x0080 /* dynamic alloc'd constraint */
-#define PERF_X86_EVENT_RDPMC_ALLOWED   0x0100 /* grant rdpmc permission */
-#define PERF_X86_EVENT_EXCL_ACCT       0x0200 /* accounted EXCL event */
-#define PERF_X86_EVENT_AUTO_RELOAD     0x0400 /* use PEBS auto-reload */
-#define PERF_X86_EVENT_LARGE_PEBS      0x0800 /* use large PEBS */
-
+#define PERF_X86_EVENT_PEBS_LD_HSW     0x0008 /* haswell style datala, load */
+#define PERF_X86_EVENT_PEBS_NA_HSW     0x0010 /* haswell style datala, unknown */
+#define PERF_X86_EVENT_EXCL            0x0020 /* HT exclusivity on counter */
+#define PERF_X86_EVENT_DYNAMIC         0x0040 /* dynamic alloc'd constraint */
+#define PERF_X86_EVENT_RDPMC_ALLOWED   0x0080 /* grant rdpmc permission */
+#define PERF_X86_EVENT_EXCL_ACCT       0x0100 /* accounted EXCL event */
+#define PERF_X86_EVENT_AUTO_RELOAD     0x0200 /* use PEBS auto-reload */
+#define PERF_X86_EVENT_LARGE_PEBS      0x0400 /* use large PEBS */
 
 struct amd_nb {
        int nb_id;  /* NorthBridge id */
@@ -96,25 +101,43 @@ struct amd_nb {
        PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
        PERF_SAMPLE_PERIOD)
 
-#define PEBS_REGS \
-       (PERF_REG_X86_AX | \
-        PERF_REG_X86_BX | \
-        PERF_REG_X86_CX | \
-        PERF_REG_X86_DX | \
-        PERF_REG_X86_DI | \
-        PERF_REG_X86_SI | \
-        PERF_REG_X86_SP | \
-        PERF_REG_X86_BP | \
-        PERF_REG_X86_IP | \
-        PERF_REG_X86_FLAGS | \
-        PERF_REG_X86_R8 | \
-        PERF_REG_X86_R9 | \
-        PERF_REG_X86_R10 | \
-        PERF_REG_X86_R11 | \
-        PERF_REG_X86_R12 | \
-        PERF_REG_X86_R13 | \
-        PERF_REG_X86_R14 | \
-        PERF_REG_X86_R15)
+#define PEBS_GP_REGS                   \
+       ((1ULL << PERF_REG_X86_AX)    | \
+        (1ULL << PERF_REG_X86_BX)    | \
+        (1ULL << PERF_REG_X86_CX)    | \
+        (1ULL << PERF_REG_X86_DX)    | \
+        (1ULL << PERF_REG_X86_DI)    | \
+        (1ULL << PERF_REG_X86_SI)    | \
+        (1ULL << PERF_REG_X86_SP)    | \
+        (1ULL << PERF_REG_X86_BP)    | \
+        (1ULL << PERF_REG_X86_IP)    | \
+        (1ULL << PERF_REG_X86_FLAGS) | \
+        (1ULL << PERF_REG_X86_R8)    | \
+        (1ULL << PERF_REG_X86_R9)    | \
+        (1ULL << PERF_REG_X86_R10)   | \
+        (1ULL << PERF_REG_X86_R11)   | \
+        (1ULL << PERF_REG_X86_R12)   | \
+        (1ULL << PERF_REG_X86_R13)   | \
+        (1ULL << PERF_REG_X86_R14)   | \
+        (1ULL << PERF_REG_X86_R15))
+
+#define PEBS_XMM_REGS                   \
+       ((1ULL << PERF_REG_X86_XMM0)  | \
+        (1ULL << PERF_REG_X86_XMM1)  | \
+        (1ULL << PERF_REG_X86_XMM2)  | \
+        (1ULL << PERF_REG_X86_XMM3)  | \
+        (1ULL << PERF_REG_X86_XMM4)  | \
+        (1ULL << PERF_REG_X86_XMM5)  | \
+        (1ULL << PERF_REG_X86_XMM6)  | \
+        (1ULL << PERF_REG_X86_XMM7)  | \
+        (1ULL << PERF_REG_X86_XMM8)  | \
+        (1ULL << PERF_REG_X86_XMM9)  | \
+        (1ULL << PERF_REG_X86_XMM10) | \
+        (1ULL << PERF_REG_X86_XMM11) | \
+        (1ULL << PERF_REG_X86_XMM12) | \
+        (1ULL << PERF_REG_X86_XMM13) | \
+        (1ULL << PERF_REG_X86_XMM14) | \
+        (1ULL << PERF_REG_X86_XMM15))
 
 /*
  * Per register state.
@@ -207,10 +230,16 @@ struct cpu_hw_events {
        int                     n_pebs;
        int                     n_large_pebs;
 
+       /* Current super set of events hardware configuration */
+       u64                     pebs_data_cfg;
+       u64                     active_pebs_data_cfg;
+       int                     pebs_record_size;
+
        /*
         * Intel LBR bits
         */
        int                             lbr_users;
+       int                             lbr_pebs_users;
        struct perf_branch_stack        lbr_stack;
        struct perf_branch_entry        lbr_entries[MAX_LBR_ENTRIES];
        struct er_account               *lbr_sel;
@@ -257,18 +286,29 @@ struct cpu_hw_events {
        void                            *kfree_on_online[X86_PERF_KFREE_MAX];
 };
 
-#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
+#define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) {        \
        { .idxmsk64 = (n) },            \
        .code = (c),                    \
+       .size = (e) - (c),              \
        .cmask = (m),                   \
        .weight = (w),                  \
        .overlap = (o),                 \
        .flags = f,                     \
 }
 
+#define __EVENT_CONSTRAINT(c, n, m, w, o, f) \
+       __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f)
+
 #define EVENT_CONSTRAINT(c, n, m)      \
        __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
 
+/*
+ * The constraint_match() function only works for 'simple' event codes
+ * and not for extended (AMD64_EVENTSEL_EVENT) events codes.
+ */
+#define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
+       __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
+
 #define INTEL_EXCLEVT_CONSTRAINT(c, n) \
        __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
                           0, PERF_X86_EVENT_EXCL)
@@ -303,6 +343,12 @@ struct cpu_hw_events {
 #define INTEL_EVENT_CONSTRAINT(c, n)   \
        EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
 
+/*
+ * Constraint on a range of Event codes
+ */
+#define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n)                  \
+       EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
+
 /*
  * Constraint on the Event code + UMask + fixed-mask
  *
@@ -350,6 +396,9 @@ struct cpu_hw_events {
 #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
        EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
 
+#define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n)                    \
+       EVENT_CONSTRAINT_RANGE(c, e, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
+
 /* Check only flags, but allow all event/umask */
 #define INTEL_ALL_EVENT_CONSTRAINT(code, n)    \
        EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
@@ -366,6 +415,11 @@ struct cpu_hw_events {
                          ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
 
+#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \
+       __EVENT_CONSTRAINT_RANGE(code, end, n,                          \
+                         ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
+                         HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
+
 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
        __EVENT_CONSTRAINT(code, n,                     \
                          ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
@@ -473,6 +527,7 @@ union perf_capabilities {
                 * values > 32bit.
                 */
                u64     full_width_write:1;
+               u64     pebs_baseline:1;
        };
        u64     capabilities;
 };
@@ -613,14 +668,16 @@ struct x86_pmu {
                        pebs_broken             :1,
                        pebs_prec_dist          :1,
                        pebs_no_tlb             :1,
-                       pebs_no_isolation       :1;
+                       pebs_no_isolation       :1,
+                       pebs_no_xmm_regs        :1;
        int             pebs_record_size;
        int             pebs_buffer_size;
+       int             max_pebs_events;
        void            (*drain_pebs)(struct pt_regs *regs);
        struct event_constraint *pebs_constraints;
        void            (*pebs_aliases)(struct perf_event *event);
-       int             max_pebs_events;
        unsigned long   large_pebs_flags;
+       u64             rtm_abort_event;
 
        /*
         * Intel LBR
@@ -714,6 +771,7 @@ static struct perf_pmu_events_ht_attr event_attr_##v = {            \
        .event_str_ht   = ht,                                           \
 }
 
+struct pmu *x86_get_pmu(void);
 extern struct x86_pmu x86_pmu __read_mostly;
 
 static inline bool x86_pmu_has_lbr_callstack(void)
@@ -941,6 +999,8 @@ extern struct event_constraint intel_bdw_pebs_event_constraints[];
 
 extern struct event_constraint intel_skl_pebs_event_constraints[];
 
+extern struct event_constraint intel_icl_pebs_event_constraints[];
+
 struct event_constraint *intel_pebs_constraints(struct perf_event *event);
 
 void intel_pmu_pebs_add(struct perf_event *event);
@@ -959,6 +1019,8 @@ void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
 
 void intel_pmu_auto_reload_read(struct perf_event *event);
 
+void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr);
+
 void intel_ds_init(void);
 
 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
index 6461a16b45594b144f8fe2390fe42973e4fc8725..e4ba467a9fc65b0ec21d7c9f868c9d013a8fddd9 100644 (file)
@@ -103,9 +103,13 @@ static int hv_cpu_init(unsigned int cpu)
        u64 msr_vp_index;
        struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()];
        void **input_arg;
+       struct page *pg;
 
        input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
-       *input_arg = page_address(alloc_page(GFP_KERNEL));
+       pg = alloc_page(GFP_KERNEL);
+       if (unlikely(!pg))
+               return -ENOMEM;
+       *input_arg = page_address(pg);
 
        hv_get_vp_index(msr_vp_index);
 
index 321fe5f5d0e96f8ed3f4962dbf982bc60551cf0e..4d5fcd47ab75a4e2815f2ed381b9356b3c18e7d1 100644 (file)
@@ -61,9 +61,8 @@
 } while (0)
 
 #define RELOAD_SEG(seg)                {               \
-       unsigned int pre = GET_SEG(seg);        \
+       unsigned int pre = (seg) | 3;           \
        unsigned int cur = get_user_seg(seg);   \
-       pre |= 3;                               \
        if (pre != cur)                         \
                set_user_seg(seg, pre);         \
 }
@@ -72,6 +71,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
                                   struct sigcontext_32 __user *sc)
 {
        unsigned int tmpflags, err = 0;
+       u16 gs, fs, es, ds;
        void __user *buf;
        u32 tmp;
 
@@ -79,16 +79,10 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
        current->restart_block.fn = do_no_restart_syscall;
 
        get_user_try {
-               /*
-                * Reload fs and gs if they have changed in the signal
-                * handler.  This does not handle long fs/gs base changes in
-                * the handler, but does not clobber them at least in the
-                * normal case.
-                */
-               RELOAD_SEG(gs);
-               RELOAD_SEG(fs);
-               RELOAD_SEG(ds);
-               RELOAD_SEG(es);
+               gs = GET_SEG(gs);
+               fs = GET_SEG(fs);
+               ds = GET_SEG(ds);
+               es = GET_SEG(es);
 
                COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
                COPY(dx); COPY(cx); COPY(ip); COPY(ax);
@@ -106,6 +100,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
                buf = compat_ptr(tmp);
        } get_user_catch(err);
 
+       /*
+        * Reload fs and gs if they have changed in the signal
+        * handler.  This does not handle long fs/gs base changes in
+        * the handler, but does not clobber them at least in the
+        * normal case.
+        */
+       RELOAD_SEG(gs);
+       RELOAD_SEG(fs);
+       RELOAD_SEG(ds);
+       RELOAD_SEG(es);
+
        err |= fpu__restore_sig(buf, 1);
 
        force_iret();
index 31b627b43a8e01933d6209e746f4c08912d0cdef..464034db299f781104da5f05a6a3604320f8d4d7 100644 (file)
        .endm
 #endif
 
+/*
+ * objtool annotation to ignore the alternatives and only consider the original
+ * instruction(s).
+ */
+.macro ANNOTATE_IGNORE_ALTERNATIVE
+       .Lannotate_\@:
+       .pushsection .discard.ignore_alts
+       .long .Lannotate_\@ - .
+       .popsection
+.endm
+
 /*
  * Issue one struct alt_instr descriptor entry (need to put it into
  * the section .altinstructions, see below). This entry contains
index 4c74073a19ccd4b2aa93078c734729e54e65bdd7..094fbc9c0b1c0332a267fb501fb037d3064a85ed 100644 (file)
 #define LOCK_PREFIX ""
 #endif
 
+/*
+ * objtool annotation to ignore the alternatives and only consider the original
+ * instruction(s).
+ */
+#define ANNOTATE_IGNORE_ALTERNATIVE                            \
+       "999:\n\t"                                              \
+       ".pushsection .discard.ignore_alts\n\t"                 \
+       ".long 999b - .\n\t"                                    \
+       ".popsection\n\t"
+
 struct alt_instr {
        s32 instr_offset;       /* original instruction */
        s32 repl_offset;        /* offset to replacement instruction */
index 6467757bb39f6b6622c0121fe40f9f6fbcfd0b39..3ff577c0b1024af1ed0e0fc9f2805f5a8ba5e804 100644 (file)
        _ASM_PTR (entry);                                       \
        .popsection
 
-.macro ALIGN_DESTINATION
-       /* check for bad alignment of destination */
-       movl %edi,%ecx
-       andl $7,%ecx
-       jz 102f                         /* already aligned */
-       subl $8,%ecx
-       negl %ecx
-       subl %ecx,%edx
-100:   movb (%rsi),%al
-101:   movb %al,(%rdi)
-       incq %rsi
-       incq %rdi
-       decl %ecx
-       jnz 100b
-102:
-       .section .fixup,"ax"
-103:   addl %ecx,%edx                  /* ecx is zerorest also */
-       jmp copy_user_handle_tail
-       .previous
-
-       _ASM_EXTABLE_UA(100b, 103b)
-       _ASM_EXTABLE_UA(101b, 103b)
-       .endm
-
 #else
 # define _EXPAND_EXTABLE_HANDLE(x) #x
 # define _ASM_EXTABLE_HANDLE(from, to, handler)                        \
index d153d570bb04755d9fb106e3375db55dd3114fd7..8e790ec219a5fd5be0e812736ff7be167a5cd20e 100644 (file)
  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  */
 
-#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
+#define RLONG_ADDR(x)                   "m" (*(volatile long *) (x))
+#define WBYTE_ADDR(x)                  "+m" (*(volatile char *) (x))
 
-#define ADDR                           BITOP_ADDR(addr)
+#define ADDR                           RLONG_ADDR(addr)
 
 /*
  * We do the locked ops that don't return the old value as
  * a mask operation on a byte.
  */
 #define IS_IMMEDIATE(nr)               (__builtin_constant_p(nr))
-#define CONST_MASK_ADDR(nr, addr)      BITOP_ADDR((void *)(addr) + ((nr)>>3))
+#define CONST_MASK_ADDR(nr, addr)      WBYTE_ADDR((void *)(addr) + ((nr)>>3))
 #define CONST_MASK(nr)                 (1 << ((nr) & 7))
 
 /**
@@ -73,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr)
                        : "memory");
        } else {
                asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
-                       : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
+                       : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
        }
 }
 
@@ -88,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr)
  */
 static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
 {
-       asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
+       asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 }
 
 /**
@@ -110,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr)
                        : "iq" ((u8)~CONST_MASK(nr)));
        } else {
                asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
-                       : BITOP_ADDR(addr)
-                       : "Ir" (nr));
+                       : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
        }
 }
 
@@ -131,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
 
 static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
 {
-       asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
+       asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 }
 
 static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
@@ -139,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
        bool negative;
        asm volatile(LOCK_PREFIX "andb %2,%1"
                CC_SET(s)
-               : CC_OUT(s) (negative), ADDR
+               : CC_OUT(s) (negative), WBYTE_ADDR(addr)
                : "ir" ((char) ~(1 << nr)) : "memory");
        return negative;
 }
@@ -155,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
  * __clear_bit() is non-atomic and implies release semantics before the memory
  * operation. It can be used for an unlock if no other CPUs can concurrently
  * modify other bits in the word.
- *
- * No memory barrier is required here, because x86 cannot reorder stores past
- * older loads. Same principle as spin_unlock.
  */
 static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
 {
-       barrier();
        __clear_bit(nr, addr);
 }
 
@@ -176,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
  */
 static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
 {
-       asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
+       asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 }
 
 /**
@@ -196,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
                        : "iq" ((u8)CONST_MASK(nr)));
        } else {
                asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
-                       : BITOP_ADDR(addr)
-                       : "Ir" (nr));
+                       : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
        }
 }
 
@@ -242,8 +237,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
 
        asm(__ASM_SIZE(bts) " %2,%1"
            CC_SET(c)
-           : CC_OUT(c) (oldbit), ADDR
-           : "Ir" (nr));
+           : CC_OUT(c) (oldbit)
+           : ADDR, "Ir" (nr) : "memory");
        return oldbit;
 }
 
@@ -282,8 +277,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
 
        asm volatile(__ASM_SIZE(btr) " %2,%1"
                     CC_SET(c)
-                    : CC_OUT(c) (oldbit), ADDR
-                    : "Ir" (nr));
+                    : CC_OUT(c) (oldbit)
+                    : ADDR, "Ir" (nr) : "memory");
        return oldbit;
 }
 
@@ -294,8 +289,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
 
        asm volatile(__ASM_SIZE(btc) " %2,%1"
                     CC_SET(c)
-                    : CC_OUT(c) (oldbit), ADDR
-                    : "Ir" (nr) : "memory");
+                    : CC_OUT(c) (oldbit)
+                    : ADDR, "Ir" (nr) : "memory");
 
        return oldbit;
 }
@@ -326,7 +321,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
        asm volatile(__ASM_SIZE(bt) " %2,%1"
                     CC_SET(c)
                     : CC_OUT(c) (oldbit)
-                    : "m" (*(unsigned long *)addr), "Ir" (nr));
+                    : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
 
        return oldbit;
 }
index 3417110574c12212f7185213bfe566ac2b541a1e..31c379c1da41c48b7f4ee89d3c100d62895ff5b2 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _CPU_DEVICE_ID
-#define _CPU_DEVICE_ID 1
+#ifndef _ASM_X86_CPU_DEVICE_ID
+#define _ASM_X86_CPU_DEVICE_ID
 
 /*
  * Declare drivers belonging to specific x86 CPUs
@@ -9,8 +9,6 @@
 
 #include <linux/mod_devicetable.h>
 
-extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
-
 /*
  * Match specific microcode revisions.
  *
@@ -22,21 +20,22 @@ extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
  */
 
 struct x86_cpu_desc {
-       __u8    x86_family;
-       __u8    x86_vendor;
-       __u8    x86_model;
-       __u8    x86_stepping;
-       __u32   x86_microcode_rev;
+       u8      x86_family;
+       u8      x86_vendor;
+       u8      x86_model;
+       u8      x86_stepping;
+       u32     x86_microcode_rev;
 };
 
-#define INTEL_CPU_DESC(mod, step, rev) {                       \
-       .x86_family = 6,                                        \
-       .x86_vendor = X86_VENDOR_INTEL,                         \
-       .x86_model = mod,                                       \
-       .x86_stepping = step,                                   \
-       .x86_microcode_rev = rev,                               \
+#define INTEL_CPU_DESC(model, stepping, revision) {            \
+       .x86_family             = 6,                            \
+       .x86_vendor             = X86_VENDOR_INTEL,             \
+       .x86_model              = (model),                      \
+       .x86_stepping           = (stepping),                   \
+       .x86_microcode_rev      = (revision),                   \
 }
 
+extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
 extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table);
 
-#endif
+#endif /* _ASM_X86_CPU_DEVICE_ID */
index ce95b8cbd2296b1e33de2e0f520a00f3981e3f23..0e56ff7e484857a1fdd8673fdfa2e0b784e23cea 100644 (file)
@@ -112,8 +112,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
         test_cpu_cap(c, bit))
 
 #define this_cpu_has(bit)                                              \
-       (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 :  \
-        x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability))
+       (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 :  \
+        x86_this_cpu_test_bit(bit,                                     \
+               (unsigned long __percpu *)&cpu_info.x86_capability))
 
 /*
  * This macro is for detection of features which need kernel
index ae26df1c27896d20d25ba18555d14625b905077a..8380c3ddd4b2ee29ec5a9ca7a117b0f1501bc6f0 100644 (file)
@@ -8,7 +8,7 @@
 
 /* The maximal number of PEBS events: */
 #define MAX_PEBS_EVENTS                8
-#define MAX_FIXED_PEBS_EVENTS  3
+#define MAX_FIXED_PEBS_EVENTS  4
 
 /*
  * A debug store configuration.
index 93c4bf598fb06c7e53865141dd3e7faa514194ff..feab24cac610e25f276d3d1f71f4705c23106b00 100644 (file)
@@ -226,7 +226,9 @@ struct x86_emulate_ops {
 
        unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
        void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
-       int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, u64 smbase);
+       int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
+                            const char *smstate);
+       void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt);
 
 };
 
index a5db4475e72db63031284ecb986c445d7c835eb8..c79abe7ca093cf3c81f4de1938066426c8984f04 100644 (file)
@@ -126,7 +126,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 }
 
 #define KVM_PERMILLE_MMU_PAGES 20
-#define KVM_MIN_ALLOC_MMU_PAGES 64
+#define KVM_MIN_ALLOC_MMU_PAGES 64UL
 #define KVM_MMU_HASH_SHIFT 12
 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
 #define KVM_MIN_FREE_MMU_PAGES 5
@@ -253,14 +253,14 @@ struct kvm_mmu_memory_cache {
  * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
  * by indirect shadow page can not be more than 15 bits.
  *
- * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access,
+ * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
  * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
  */
 union kvm_mmu_page_role {
        u32 word;
        struct {
                unsigned level:4;
-               unsigned cr4_pae:1;
+               unsigned gpte_is_8_bytes:1;
                unsigned quadrant:2;
                unsigned direct:1;
                unsigned access:3;
@@ -295,6 +295,7 @@ union kvm_mmu_extended_role {
                unsigned int valid:1;
                unsigned int execonly:1;
                unsigned int cr0_pg:1;
+               unsigned int cr4_pae:1;
                unsigned int cr4_pse:1;
                unsigned int cr4_pke:1;
                unsigned int cr4_smap:1;
@@ -350,6 +351,7 @@ struct kvm_mmu_page {
 };
 
 struct kvm_pio_request {
+       unsigned long linear_rip;
        unsigned long count;
        int in;
        int port;
@@ -568,6 +570,7 @@ struct kvm_vcpu_arch {
        bool tpr_access_reporting;
        u64 ia32_xss;
        u64 microcode_version;
+       u64 arch_capabilities;
 
        /*
         * Paging state of the vcpu
@@ -842,9 +845,9 @@ enum kvm_irqchip_mode {
 };
 
 struct kvm_arch {
-       unsigned int n_used_mmu_pages;
-       unsigned int n_requested_mmu_pages;
-       unsigned int n_max_mmu_pages;
+       unsigned long n_used_mmu_pages;
+       unsigned long n_requested_mmu_pages;
+       unsigned long n_max_mmu_pages;
        unsigned int indirect_shadow_pages;
        struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
        /*
@@ -1180,7 +1183,7 @@ struct kvm_x86_ops {
 
        int (*smi_allowed)(struct kvm_vcpu *vcpu);
        int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
-       int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
+       int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
        int (*enable_smi_window)(struct kvm_vcpu *vcpu);
 
        int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
@@ -1192,6 +1195,8 @@ struct kvm_x86_ops {
        int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
                                   uint16_t *vmcs_version);
        uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
+
+       bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
 };
 
 struct kvm_arch_async_pf {
@@ -1252,8 +1257,8 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
                                   gfn_t gfn_offset, unsigned long mask);
 void kvm_mmu_zap_all(struct kvm *kvm);
 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
+unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
 
 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
 bool pdptrs_changed(struct kvm_vcpu *vcpu);
@@ -1588,4 +1593,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
 #define put_smstate(type, buf, offset, val)                      \
        *(type *)((buf) + (offset) - 0x7e00) = val
 
+#define GET_SMSTATE(type, buf, offset)         \
+       (*(type *)((buf) + (offset) - 0x7e00))
+
 #endif /* _ASM_X86_KVM_HOST_H */
index ca5bc0eacb95f56b144a2990b396520f51e0e8bb..1378518cf63ffe6980df592847e8c082e6d4bebb 100644 (file)
 #define LBR_INFO_CYCLES                        0xffff
 
 #define MSR_IA32_PEBS_ENABLE           0x000003f1
+#define MSR_PEBS_DATA_CFG              0x000003f2
 #define MSR_IA32_DS_AREA               0x00000600
 #define MSR_IA32_PERF_CAPABILITIES     0x00000345
 #define MSR_PEBS_LD_LAT_THRESHOLD      0x000003f6
index dad12b767ba069ede01be842e25a5d04afa35297..daf25b60c9e3a5ff6b83ee80028689b6449e85d2 100644 (file)
 #include <asm/cpufeatures.h>
 #include <asm/msr-index.h>
 
+/*
+ * This should be used immediately before a retpoline alternative. It tells
+ * objtool where the retpolines are so that it can make sense of the control
+ * flow by just reading the original instruction(s) and ignoring the
+ * alternatives.
+ */
+#define ANNOTATE_NOSPEC_ALTERNATIVE \
+       ANNOTATE_IGNORE_ALTERNATIVE
+
 /*
  * Fill the CPU return stack buffer.
  *
 
 #ifdef __ASSEMBLY__
 
-/*
- * This should be used immediately before a retpoline alternative.  It tells
- * objtool where the retpolines are so that it can make sense of the control
- * flow by just reading the original instruction(s) and ignoring the
- * alternatives.
- */
-.macro ANNOTATE_NOSPEC_ALTERNATIVE
-       .Lannotate_\@:
-       .pushsection .discard.nospec
-       .long .Lannotate_\@ - .
-       .popsection
-.endm
-
 /*
  * This should be used immediately before an indirect jump/call. It tells
  * objtool the subsequent indirect jump/call is vouched safe for retpoline
 
 #else /* __ASSEMBLY__ */
 
-#define ANNOTATE_NOSPEC_ALTERNATIVE                            \
-       "999:\n\t"                                              \
-       ".pushsection .discard.nospec\n\t"                      \
-       ".long 999b - .\n\t"                                    \
-       ".popsection\n\t"
-
 #define ANNOTATE_RETPOLINE_SAFE                                        \
        "999:\n\t"                                              \
        ".pushsection .discard.retpoline_safe\n\t"              \
index 8bdf74902293489a031aa300a605447e83b96341..1392d5e6e8d671fe7d503646399c193dfce2dafa 100644 (file)
@@ -7,7 +7,7 @@
  */
 
 #define INTEL_PMC_MAX_GENERIC                                 32
-#define INTEL_PMC_MAX_FIXED                                    3
+#define INTEL_PMC_MAX_FIXED                                    4
 #define INTEL_PMC_IDX_FIXED                                   32
 
 #define X86_PMC_IDX_MAX                                               64
@@ -32,6 +32,8 @@
 
 #define HSW_IN_TX                                      (1ULL << 32)
 #define HSW_IN_TX_CHECKPOINTED                         (1ULL << 33)
+#define ICL_EVENTSEL_ADAPTIVE                          (1ULL << 34)
+#define ICL_FIXED_0_ADAPTIVE                           (1ULL << 32)
 
 #define AMD64_EVENTSEL_INT_CORE_ENABLE                 (1ULL << 36)
 #define AMD64_EVENTSEL_GUESTONLY                       (1ULL << 40)
 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED             6
 #define ARCH_PERFMON_EVENTS_COUNT                      7
 
+#define PEBS_DATACFG_MEMINFO   BIT_ULL(0)
+#define PEBS_DATACFG_GP        BIT_ULL(1)
+#define PEBS_DATACFG_XMMS      BIT_ULL(2)
+#define PEBS_DATACFG_LBRS      BIT_ULL(3)
+#define PEBS_DATACFG_LBR_SHIFT 24
+
 /*
  * Intel "Architectural Performance Monitoring" CPUID
  * detection/enumeration details:
@@ -176,6 +184,41 @@ struct x86_pmu_capability {
 #define GLOBAL_STATUS_LBRS_FROZEN                      BIT_ULL(58)
 #define GLOBAL_STATUS_TRACE_TOPAPMI                    BIT_ULL(55)
 
+/*
+ * Adaptive PEBS v4
+ */
+
+struct pebs_basic {
+       u64 format_size;
+       u64 ip;
+       u64 applicable_counters;
+       u64 tsc;
+};
+
+struct pebs_meminfo {
+       u64 address;
+       u64 aux;
+       u64 latency;
+       u64 tsx_tuning;
+};
+
+struct pebs_gprs {
+       u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
+       u64 r8, r9, r10, r11, r12, r13, r14, r15;
+};
+
+struct pebs_xmm {
+       u64 xmm[16*2];  /* two entries for each register */
+};
+
+struct pebs_lbr_entry {
+       u64 from, to, info;
+};
+
+struct pebs_lbr {
+       struct pebs_lbr_entry lbr[0]; /* Variable length */
+};
+
 /*
  * IBS cpuid feature detection
  */
@@ -248,6 +291,11 @@ extern void perf_events_lapic_init(void);
 #define PERF_EFLAGS_VM         (1UL << 5)
 
 struct pt_regs;
+struct x86_perf_regs {
+       struct pt_regs  regs;
+       u64             *xmm_regs;
+};
+
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
 extern unsigned long perf_misc_flags(struct pt_regs *regs);
 #define perf_misc_flags(regs)  perf_misc_flags(regs)
@@ -260,14 +308,9 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
  */
 #define perf_arch_fetch_caller_regs(regs, __ip)                {       \
        (regs)->ip = (__ip);                                    \
-       (regs)->bp = caller_frame_pointer();                    \
+       (regs)->sp = (unsigned long)__builtin_frame_address(0); \
        (regs)->cs = __KERNEL_CS;                               \
        regs->flags = 0;                                        \
-       asm volatile(                                           \
-               _ASM_MOV "%%"_ASM_SP ", %0\n"                   \
-               : "=m" ((regs)->sp)                             \
-               :: "memory"                                     \
-       );                                                      \
 }
 
 struct perf_guest_switch_msr {
index 2779ace16d23f21d5cb7b65faf87f384b3b05268..50b3e2d963c9a533efb250f6558a6e916feb57c2 100644 (file)
@@ -46,7 +46,7 @@ void ptdump_walk_user_pgd_level_checkwx(void);
  */
 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
        __visible;
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
 
 extern spinlock_t pgd_lock;
 extern struct list_head pgd_list;
index aaedd73ea2c66a21c2456e8519841ebd6c56b18e..df700a6cc869bb6309bc1069e3e3c030b47281f7 100644 (file)
@@ -3,19 +3,6 @@
  * NSC/Cyrix CPU indexed register access. Must be inlined instead of
  * macros to ensure correct access ordering
  * Access order is always 0x22 (=offset), 0x23 (=value)
- *
- * When using the old macros a line like
- *   setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
- * gets expanded to:
- *  do {
- *    outb((CX86_CCR2), 0x22);
- *    outb((({
- *        outb((CX86_CCR2), 0x22);
- *        inb(0x23);
- *    }) | 0x88), 0x23);
- *  } while (0);
- *
- * which in fact violates the access order (= 0x22, 0x22, 0x23, 0x23).
  */
 
 static inline u8 getCx86(u8 reg)
@@ -29,11 +16,3 @@ static inline void setCx86(u8 reg, u8 data)
        outb(reg, 0x22);
        outb(data, 0x23);
 }
-
-#define getCx86_old(reg) ({ outb((reg), 0x22); inb(0x23); })
-
-#define setCx86_old(reg, data) do { \
-       outb((reg), 0x22); \
-       outb((data), 0x23); \
-} while (0)
-
index 63b3393bd98ea2caaa67aefeeab359cbac72dc51..c53682303c9c1252a79d90cf8dd19a96d126db93 100644 (file)
@@ -77,7 +77,11 @@ static inline size_t real_mode_size_needed(void)
        return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE);
 }
 
-void set_real_mode_mem(phys_addr_t mem, size_t size);
+static inline void set_real_mode_mem(phys_addr_t mem)
+{
+       real_mode_header = (struct real_mode_header *) __va(mem);
+}
+
 void reserve_real_mode(void);
 
 #endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
deleted file mode 100644 (file)
index 4c25cf6..0000000
+++ /dev/null
@@ -1,237 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+
- *
- * Written by David Howells (dhowells@redhat.com).
- *
- * Derived from asm-x86/semaphore.h
- *
- *
- * The MSW of the count is the negated number of active writers and waiting
- * lockers, and the LSW is the total number of active locks
- *
- * The lock count is initialized to 0 (no active and no waiting lockers).
- *
- * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
- * uncontended lock. This can be determined because XADD returns the old value.
- * Readers increment by 1 and see a positive value when uncontended, negative
- * if there are writers (and maybe) readers waiting (in which case it goes to
- * sleep).
- *
- * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
- * be extended to 65534 by manually checking the whole MSW rather than relying
- * on the S flag.
- *
- * The value of ACTIVE_BIAS supports up to 65535 active processes.
- *
- * This should be totally fair - if anything is waiting, a process that wants a
- * lock will go to the back of the queue. When the currently active lock is
- * released, if there's a writer at the front of the queue, then that and only
- * that will be woken up; if there's a bunch of consecutive readers at the
- * front, then they'll all be woken up, but no other readers will be.
- */
-
-#ifndef _ASM_X86_RWSEM_H
-#define _ASM_X86_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-#include <asm/asm.h>
-
-/*
- * The bias values and the counter type limits the number of
- * potential readers/writers to 32767 for 32 bits and 2147483647
- * for 64 bits.
- */
-
-#ifdef CONFIG_X86_64
-# define RWSEM_ACTIVE_MASK             0xffffffffL
-#else
-# define RWSEM_ACTIVE_MASK             0x0000ffffL
-#endif
-
-#define RWSEM_UNLOCKED_VALUE           0x00000000L
-#define RWSEM_ACTIVE_BIAS              0x00000001L
-#define RWSEM_WAITING_BIAS             (-RWSEM_ACTIVE_MASK-1)
-#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-#define ____down_read(sem, slow_path)                                  \
-({                                                                     \
-       struct rw_semaphore* ret;                                       \
-       asm volatile("# beginning down_read\n\t"                        \
-                    LOCK_PREFIX _ASM_INC "(%[sem])\n\t"                \
-                    /* adds 0x00000001 */                              \
-                    "  jns        1f\n"                                \
-                    "  call " slow_path "\n"                           \
-                    "1:\n\t"                                           \
-                    "# ending down_read\n\t"                           \
-                    : "+m" (sem->count), "=a" (ret),                   \
-                       ASM_CALL_CONSTRAINT                             \
-                    : [sem] "a" (sem)                                  \
-                    : "memory", "cc");                                 \
-       ret;                                                            \
-})
-
-static inline void __down_read(struct rw_semaphore *sem)
-{
-       ____down_read(sem, "call_rwsem_down_read_failed");
-}
-
-static inline int __down_read_killable(struct rw_semaphore *sem)
-{
-       if (IS_ERR(____down_read(sem, "call_rwsem_down_read_failed_killable")))
-               return -EINTR;
-       return 0;
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-static inline bool __down_read_trylock(struct rw_semaphore *sem)
-{
-       long result, tmp;
-       asm volatile("# beginning __down_read_trylock\n\t"
-                    "  mov          %[count],%[result]\n\t"
-                    "1:\n\t"
-                    "  mov          %[result],%[tmp]\n\t"
-                    "  add          %[inc],%[tmp]\n\t"
-                    "  jle          2f\n\t"
-                    LOCK_PREFIX "  cmpxchg  %[tmp],%[count]\n\t"
-                    "  jnz          1b\n\t"
-                    "2:\n\t"
-                    "# ending __down_read_trylock\n\t"
-                    : [count] "+m" (sem->count), [result] "=&a" (result),
-                      [tmp] "=&r" (tmp)
-                    : [inc] "i" (RWSEM_ACTIVE_READ_BIAS)
-                    : "memory", "cc");
-       return result >= 0;
-}
-
-/*
- * lock for writing
- */
-#define ____down_write(sem, slow_path)                 \
-({                                                     \
-       long tmp;                                       \
-       struct rw_semaphore* ret;                       \
-                                                       \
-       asm volatile("# beginning down_write\n\t"       \
-                    LOCK_PREFIX "  xadd      %[tmp],(%[sem])\n\t"      \
-                    /* adds 0xffff0001, returns the old value */ \
-                    "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
-                    /* was the active mask 0 before? */\
-                    "  jz        1f\n"                 \
-                    "  call " slow_path "\n"           \
-                    "1:\n"                             \
-                    "# ending down_write"              \
-                    : "+m" (sem->count), [tmp] "=d" (tmp),     \
-                      "=a" (ret), ASM_CALL_CONSTRAINT  \
-                    : [sem] "a" (sem), "[tmp]" (RWSEM_ACTIVE_WRITE_BIAS) \
-                    : "memory", "cc");                 \
-       ret;                                            \
-})
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
-       ____down_write(sem, "call_rwsem_down_write_failed");
-}
-
-static inline int __down_write_killable(struct rw_semaphore *sem)
-{
-       if (IS_ERR(____down_write(sem, "call_rwsem_down_write_failed_killable")))
-               return -EINTR;
-
-       return 0;
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-static inline bool __down_write_trylock(struct rw_semaphore *sem)
-{
-       bool result;
-       long tmp0, tmp1;
-       asm volatile("# beginning __down_write_trylock\n\t"
-                    "  mov          %[count],%[tmp0]\n\t"
-                    "1:\n\t"
-                    "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
-                    /* was the active mask 0 before? */
-                    "  jnz          2f\n\t"
-                    "  mov          %[tmp0],%[tmp1]\n\t"
-                    "  add          %[inc],%[tmp1]\n\t"
-                    LOCK_PREFIX "  cmpxchg  %[tmp1],%[count]\n\t"
-                    "  jnz          1b\n\t"
-                    "2:\n\t"
-                    CC_SET(e)
-                    "# ending __down_write_trylock\n\t"
-                    : [count] "+m" (sem->count), [tmp0] "=&a" (tmp0),
-                      [tmp1] "=&r" (tmp1), CC_OUT(e) (result)
-                    : [inc] "er" (RWSEM_ACTIVE_WRITE_BIAS)
-                    : "memory");
-       return result;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-       long tmp;
-       asm volatile("# beginning __up_read\n\t"
-                    LOCK_PREFIX "  xadd      %[tmp],(%[sem])\n\t"
-                    /* subtracts 1, returns the old value */
-                    "  jns        1f\n\t"
-                    "  call call_rwsem_wake\n" /* expects old value in %edx */
-                    "1:\n"
-                    "# ending __up_read\n"
-                    : "+m" (sem->count), [tmp] "=d" (tmp)
-                    : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_READ_BIAS)
-                    : "memory", "cc");
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-       long tmp;
-       asm volatile("# beginning __up_write\n\t"
-                    LOCK_PREFIX "  xadd      %[tmp],(%[sem])\n\t"
-                    /* subtracts 0xffff0001, returns the old value */
-                    "  jns        1f\n\t"
-                    "  call call_rwsem_wake\n" /* expects old value in %edx */
-                    "1:\n\t"
-                    "# ending __up_write\n"
-                    : "+m" (sem->count), [tmp] "=d" (tmp)
-                    : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_WRITE_BIAS)
-                    : "memory", "cc");
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
-       asm volatile("# beginning __downgrade_write\n\t"
-                    LOCK_PREFIX _ASM_ADD "%[inc],(%[sem])\n\t"
-                    /*
-                     * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
-                     *     0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
-                     */
-                    "  jns       1f\n\t"
-                    "  call call_rwsem_downgrade_wake\n"
-                    "1:\n\t"
-                    "# ending __downgrade_write\n"
-                    : "+m" (sem->count)
-                    : [sem] "a" (sem), [inc] "er" (-RWSEM_WAITING_BIAS)
-                    : "memory", "cc");
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_X86_RWSEM_H */
index db333300bd4be17205daf3b2e820c0af101ccc2d..f94a7d0ddd490e19a168cb7404a4a0cbda2e7d28 100644 (file)
 #ifndef _ASM_X86_SMAP_H
 #define _ASM_X86_SMAP_H
 
-#include <linux/stringify.h>
 #include <asm/nops.h>
 #include <asm/cpufeatures.h>
 
 /* "Raw" instruction opcodes */
-#define __ASM_CLAC     .byte 0x0f,0x01,0xca
-#define __ASM_STAC     .byte 0x0f,0x01,0xcb
+#define __ASM_CLAC     ".byte 0x0f,0x01,0xca"
+#define __ASM_STAC     ".byte 0x0f,0x01,0xcb"
 
 #ifdef __ASSEMBLY__
 
 #ifdef CONFIG_X86_SMAP
 
 #define ASM_CLAC \
-       ALTERNATIVE "", __stringify(__ASM_CLAC), X86_FEATURE_SMAP
+       ALTERNATIVE "", __ASM_CLAC, X86_FEATURE_SMAP
 
 #define ASM_STAC \
-       ALTERNATIVE "", __stringify(__ASM_STAC), X86_FEATURE_SMAP
+       ALTERNATIVE "", __ASM_STAC, X86_FEATURE_SMAP
 
 #else /* CONFIG_X86_SMAP */
 
 static __always_inline void clac(void)
 {
        /* Note: a barrier is implicit in alternative() */
-       alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
+       alternative("", __ASM_CLAC, X86_FEATURE_SMAP);
 }
 
 static __always_inline void stac(void)
 {
        /* Note: a barrier is implicit in alternative() */
-       alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP);
+       alternative("", __ASM_STAC, X86_FEATURE_SMAP);
+}
+
+static __always_inline unsigned long smap_save(void)
+{
+       unsigned long flags;
+
+       asm volatile (ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC,
+                                 X86_FEATURE_SMAP)
+                     : "=rm" (flags) : : "memory", "cc");
+
+       return flags;
+}
+
+static __always_inline void smap_restore(unsigned long flags)
+{
+       asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP)
+                     : : "g" (flags) : "memory", "cc");
 }
 
 /* These macros can be used in asm() statements */
 #define ASM_CLAC \
-       ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
+       ALTERNATIVE("", __ASM_CLAC, X86_FEATURE_SMAP)
 #define ASM_STAC \
-       ALTERNATIVE("", __stringify(__ASM_STAC), X86_FEATURE_SMAP)
+       ALTERNATIVE("", __ASM_STAC, X86_FEATURE_SMAP)
 
 #else /* CONFIG_X86_SMAP */
 
 static inline void clac(void) { }
 static inline void stac(void) { }
 
+static inline unsigned long smap_save(void) { return 0; }
+static inline void smap_restore(unsigned long flags) { }
+
 #define ASM_CLAC
 #define ASM_STAC
 
index f335aad404a479e98e4a5d38dc41ee5e5aa419ec..beef7ad9e43a4a7391aad3db4231de806f4799d6 100644 (file)
@@ -98,19 +98,6 @@ struct stack_frame_ia32 {
     u32 return_address;
 };
 
-static inline unsigned long caller_frame_pointer(void)
-{
-       struct stack_frame *frame;
-
-       frame = __builtin_frame_address(0);
-
-#ifdef CONFIG_FRAME_POINTER
-       frame = frame->next_frame;
-#endif
-
-       return (unsigned long)frame;
-}
-
 void show_opcodes(struct pt_regs *regs, const char *loglvl);
 void show_ip(struct pt_regs *regs, const char *loglvl);
 #endif /* _ASM_X86_STACKTRACE_H */
index 7cf1a270d89101822da3c9390f4e1f112258939e..18a4b6890fa82f589b9609ce1e509574a5411bf5 100644 (file)
@@ -46,6 +46,7 @@ struct inactive_task_frame {
        unsigned long r13;
        unsigned long r12;
 #else
+       unsigned long flags;
        unsigned long si;
        unsigned long di;
 #endif
index d653139857af2a1121f877b611c8d56d4e4690f0..4c305471ec3312e3b0adc7063c30e9d3edf2de7f 100644 (file)
@@ -91,11 +91,9 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
-       BUG_ON(i + n > 6);
-       memcpy(args, &regs->bx + i, n * sizeof(args[0]));
+       memcpy(args, &regs->bx, 6 * sizeof(args[0]));
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
@@ -116,124 +114,50 @@ static inline int syscall_get_arch(void)
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
 # ifdef CONFIG_IA32_EMULATION
-       if (task->thread_info.status & TS_COMPAT)
-               switch (i) {
-               case 0:
-                       if (!n--) break;
-                       *args++ = regs->bx;
-               case 1:
-                       if (!n--) break;
-                       *args++ = regs->cx;
-               case 2:
-                       if (!n--) break;
-                       *args++ = regs->dx;
-               case 3:
-                       if (!n--) break;
-                       *args++ = regs->si;
-               case 4:
-                       if (!n--) break;
-                       *args++ = regs->di;
-               case 5:
-                       if (!n--) break;
-                       *args++ = regs->bp;
-               case 6:
-                       if (!n--) break;
-               default:
-                       BUG();
-                       break;
-               }
-       else
+       if (task->thread_info.status & TS_COMPAT) {
+               *args++ = regs->bx;
+               *args++ = regs->cx;
+               *args++ = regs->dx;
+               *args++ = regs->si;
+               *args++ = regs->di;
+               *args   = regs->bp;
+       } else
 # endif
-               switch (i) {
-               case 0:
-                       if (!n--) break;
-                       *args++ = regs->di;
-               case 1:
-                       if (!n--) break;
-                       *args++ = regs->si;
-               case 2:
-                       if (!n--) break;
-                       *args++ = regs->dx;
-               case 3:
-                       if (!n--) break;
-                       *args++ = regs->r10;
-               case 4:
-                       if (!n--) break;
-                       *args++ = regs->r8;
-               case 5:
-                       if (!n--) break;
-                       *args++ = regs->r9;
-               case 6:
-                       if (!n--) break;
-               default:
-                       BUG();
-                       break;
-               }
+       {
+               *args++ = regs->di;
+               *args++ = regs->si;
+               *args++ = regs->dx;
+               *args++ = regs->r10;
+               *args++ = regs->r8;
+               *args   = regs->r9;
+       }
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
 # ifdef CONFIG_IA32_EMULATION
-       if (task->thread_info.status & TS_COMPAT)
-               switch (i) {
-               case 0:
-                       if (!n--) break;
-                       regs->bx = *args++;
-               case 1:
-                       if (!n--) break;
-                       regs->cx = *args++;
-               case 2:
-                       if (!n--) break;
-                       regs->dx = *args++;
-               case 3:
-                       if (!n--) break;
-                       regs->si = *args++;
-               case 4:
-                       if (!n--) break;
-                       regs->di = *args++;
-               case 5:
-                       if (!n--) break;
-                       regs->bp = *args++;
-               case 6:
-                       if (!n--) break;
-               default:
-                       BUG();
-                       break;
-               }
-       else
+       if (task->thread_info.status & TS_COMPAT) {
+               regs->bx = *args++;
+               regs->cx = *args++;
+               regs->dx = *args++;
+               regs->si = *args++;
+               regs->di = *args++;
+               regs->bp = *args;
+       } else
 # endif
-               switch (i) {
-               case 0:
-                       if (!n--) break;
-                       regs->di = *args++;
-               case 1:
-                       if (!n--) break;
-                       regs->si = *args++;
-               case 2:
-                       if (!n--) break;
-                       regs->dx = *args++;
-               case 3:
-                       if (!n--) break;
-                       regs->r10 = *args++;
-               case 4:
-                       if (!n--) break;
-                       regs->r8 = *args++;
-               case 5:
-                       if (!n--) break;
-                       regs->r9 = *args++;
-               case 6:
-                       if (!n--) break;
-               default:
-                       BUG();
-                       break;
-               }
+       {
+               regs->di = *args++;
+               regs->si = *args++;
+               regs->dx = *args++;
+               regs->r10 = *args++;
+               regs->r8 = *args++;
+               regs->r9 = *args;
+       }
 }
 
 static inline int syscall_get_arch(void)
index 404b8b1d44f5899bb2db788a5aee7c588445748d..f23e7aaff4cd0914517d2b76bcfadb0cf9c70d1d 100644 (file)
@@ -6,6 +6,7 @@
 #define tlb_end_vma(tlb, vma) do { } while (0)
 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
 
+#define tlb_flush tlb_flush
 static inline void tlb_flush(struct mmu_gather *tlb);
 
 #include <asm-generic/tlb.h>
index 1954dd5552a2e2fbeaf21937ad4c6d98c6ba0aff..bb21913885a35eb29ca5816c2dd89817fb109a09 100644 (file)
@@ -427,10 +427,11 @@ do {                                                                      \
 ({                                                             \
        __label__ __pu_label;                                   \
        int __pu_err = -EFAULT;                                 \
-       __typeof__(*(ptr)) __pu_val;                            \
-       __pu_val = x;                                           \
+       __typeof__(*(ptr)) __pu_val = (x);                      \
+       __typeof__(ptr) __pu_ptr = (ptr);                       \
+       __typeof__(size) __pu_size = (size);                    \
        __uaccess_begin();                                      \
-       __put_user_size(__pu_val, (ptr), (size), __pu_label);   \
+       __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label);     \
        __pu_err = 0;                                           \
 __pu_label:                                                    \
        __uaccess_end();                                        \
@@ -705,7 +706,7 @@ extern struct movsl_mask {
  * checking before using them, but you have to surround them with the
  * user_access_begin/end() pair.
  */
-static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
+static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
 {
        if (unlikely(!access_ok(ptr,len)))
                return 0;
@@ -715,6 +716,9 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
 #define user_access_begin(a,b) user_access_begin(a,b)
 #define user_access_end()      __uaccess_end()
 
+#define user_access_save()     smap_save()
+#define user_access_restore(x) smap_restore(x)
+
 #define unsafe_put_user(x, ptr, label) \
        __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
 
index a9d637bc301d7dd0086b5126a5ebac8f042c62c9..5cd1caa8bc6537c8795218581118c60552128ad8 100644 (file)
@@ -207,9 +207,6 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
        return __copy_user_flushcache(dst, src, size);
 }
 
-unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len);
-
 unsigned long
 mcsafe_handle_tail(char *to, char *from, unsigned len);
 
index de6f0d59a24f418febf72e40dd595e41dcb3c7c0..d50c7b747d8b879182cee633b22e3809b66af2b6 100644 (file)
@@ -206,6 +206,9 @@ xen_single_call(unsigned int call,
        __HYPERCALL_DECLS;
        __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
 
+       if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
+               return -EINVAL;
+
        asm volatile(CALL_NOSPEC
                     : __HYPERCALL_5PARAM
                     : [thunk_target] "a" (&hypercall_page[call])
@@ -214,6 +217,22 @@ xen_single_call(unsigned int call,
        return (long)__res;
 }
 
+static __always_inline void __xen_stac(void)
+{
+       /*
+        * Suppress objtool seeing the STAC/CLAC and getting confused about it
+        * calling random code with AC=1.
+        */
+       asm volatile(ANNOTATE_IGNORE_ALTERNATIVE
+                    ASM_STAC ::: "memory", "flags");
+}
+
+static __always_inline void __xen_clac(void)
+{
+       asm volatile(ANNOTATE_IGNORE_ALTERNATIVE
+                    ASM_CLAC ::: "memory", "flags");
+}
+
 static inline long
 privcmd_call(unsigned int call,
             unsigned long a1, unsigned long a2,
@@ -222,9 +241,9 @@ privcmd_call(unsigned int call,
 {
        long res;
 
-       stac();
+       __xen_stac();
        res = xen_single_call(call, a1, a2, a3, a4, a5);
-       clac();
+       __xen_clac();
 
        return res;
 }
@@ -421,9 +440,9 @@ HYPERVISOR_dm_op(
        domid_t dom, unsigned int nr_bufs, struct xen_dm_op_buf *bufs)
 {
        int ret;
-       stac();
+       __xen_stac();
        ret = _hypercall3(int, dm_op, dom, nr_bufs, bufs);
-       clac();
+       __xen_clac();
        return ret;
 }
 
index dabfcf7c3941aa90a92a91ee37f1164447c71655..7a0e64ccd6ff5d02108a4424fc72a36f49018987 100644 (file)
@@ -381,6 +381,7 @@ struct kvm_sync_regs {
 #define KVM_X86_QUIRK_LINT0_REENABLED  (1 << 0)
 #define KVM_X86_QUIRK_CD_NW_CLEARED    (1 << 1)
 #define KVM_X86_QUIRK_LAPIC_MMIO_HOLE  (1 << 2)
+#define KVM_X86_QUIRK_OUT_7E_INC_RIP   (1 << 3)
 
 #define KVM_STATE_NESTED_GUEST_MODE    0x00000001
 #define KVM_STATE_NESTED_RUN_PENDING   0x00000002
index f3329cabce5c6d9e7c605a0fb46f764e2d643141..ac67bbea10cae36848ff0be197c40a3a7af7c0f6 100644 (file)
@@ -27,8 +27,29 @@ enum perf_event_x86_regs {
        PERF_REG_X86_R13,
        PERF_REG_X86_R14,
        PERF_REG_X86_R15,
-
+       /* These are the limits for the GPRs. */
        PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1,
        PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1,
+
+       /* These all need two bits set because they are 128bit */
+       PERF_REG_X86_XMM0  = 32,
+       PERF_REG_X86_XMM1  = 34,
+       PERF_REG_X86_XMM2  = 36,
+       PERF_REG_X86_XMM3  = 38,
+       PERF_REG_X86_XMM4  = 40,
+       PERF_REG_X86_XMM5  = 42,
+       PERF_REG_X86_XMM6  = 44,
+       PERF_REG_X86_XMM7  = 46,
+       PERF_REG_X86_XMM8  = 48,
+       PERF_REG_X86_XMM9  = 50,
+       PERF_REG_X86_XMM10 = 52,
+       PERF_REG_X86_XMM11 = 54,
+       PERF_REG_X86_XMM12 = 56,
+       PERF_REG_X86_XMM13 = 58,
+       PERF_REG_X86_XMM14 = 60,
+       PERF_REG_X86_XMM15 = 62,
+
+       /* These include both GPRs and XMMX registers */
+       PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2,
 };
 #endif /* _ASM_X86_PERF_REGS_H */
index f0b0c90dd398246eb2882050d69c6b53ccca11af..d213ec5c3766db0dd5176c951b13e5f3c1514cfb 100644 (file)
 
 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL        1
 #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL       2
+#define VMX_ABORT_VMCS_CORRUPTED             3
 #define VMX_ABORT_LOAD_HOST_MSR_FAIL         4
 
 #endif /* _UAPIVMX_H */
index 58176b56354e4977ff20fb6140ef119f912f9fb8..294ed4392a0ecd965b6b527ba499d3c1be1d1fce 100644 (file)
@@ -14,6 +14,7 @@
 #define pr_fmt(fmt) "AGP: " fmt
 
 #include <linux/kernel.h>
+#include <linux/kcore.h>
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/memblock.h>
@@ -57,7 +58,7 @@ int fallback_aper_force __initdata;
 
 int fix_aperture __initdata = 1;
 
-#ifdef CONFIG_PROC_VMCORE
+#if defined(CONFIG_PROC_VMCORE) || defined(CONFIG_PROC_KCORE)
 /*
  * If the first kernel maps the aperture over e820 RAM, the kdump kernel will
  * use the same range because it will remain configured in the northbridge.
@@ -66,20 +67,25 @@ int fix_aperture __initdata = 1;
  */
 static unsigned long aperture_pfn_start, aperture_page_count;
 
-static int gart_oldmem_pfn_is_ram(unsigned long pfn)
+static int gart_mem_pfn_is_ram(unsigned long pfn)
 {
        return likely((pfn < aperture_pfn_start) ||
                      (pfn >= aperture_pfn_start + aperture_page_count));
 }
 
-static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
+static void __init exclude_from_core(u64 aper_base, u32 aper_order)
 {
        aperture_pfn_start = aper_base >> PAGE_SHIFT;
        aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT;
-       WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram));
+#ifdef CONFIG_PROC_VMCORE
+       WARN_ON(register_oldmem_pfn_is_ram(&gart_mem_pfn_is_ram));
+#endif
+#ifdef CONFIG_PROC_KCORE
+       WARN_ON(register_mem_pfn_is_ram(&gart_mem_pfn_is_ram));
+#endif
 }
 #else
-static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
+static void exclude_from_core(u64 aper_base, u32 aper_order)
 {
 }
 #endif
@@ -474,7 +480,7 @@ out:
                         * may have allocated the range over its e820 RAM
                         * and fixed up the northbridge
                         */
-                       exclude_from_vmcore(last_aper_base, last_aper_order);
+                       exclude_from_core(last_aper_base, last_aper_order);
 
                        return 1;
                }
@@ -520,7 +526,7 @@ out:
         * overlap with the first kernel's memory. We can't access the
         * range through vmcore even though it should be part of the dump.
         */
-       exclude_from_vmcore(aper_alloc, aper_order);
+       exclude_from_core(aper_alloc, aper_order);
 
        /* Fix up the north bridges */
        for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
index 2da82eff0eb4f8498c8cdd65bd9f9dd5fa1fa6eb..29630393f300733a7a750f3a4eec24091a3e958a 100644 (file)
@@ -275,7 +275,7 @@ static const struct {
        const char                      *option;
        enum spectre_v2_user_cmd        cmd;
        bool                            secure;
-} v2_user_options[] __initdata = {
+} v2_user_options[] __initconst = {
        { "auto",               SPECTRE_V2_USER_CMD_AUTO,               false },
        { "off",                SPECTRE_V2_USER_CMD_NONE,               false },
        { "on",                 SPECTRE_V2_USER_CMD_FORCE,              true  },
@@ -419,7 +419,7 @@ static const struct {
        const char *option;
        enum spectre_v2_mitigation_cmd cmd;
        bool secure;
-} mitigation_options[] __initdata = {
+} mitigation_options[] __initconst = {
        { "off",                SPECTRE_V2_CMD_NONE,              false },
        { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
        { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
@@ -440,7 +440,8 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
        char arg[20];
        int ret, i;
 
-       if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
+       if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
+           cpu_mitigations_off())
                return SPECTRE_V2_CMD_NONE;
 
        ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
@@ -658,7 +659,7 @@ static const char * const ssb_strings[] = {
 static const struct {
        const char *option;
        enum ssb_mitigation_cmd cmd;
-} ssb_mitigation_options[]  __initdata = {
+} ssb_mitigation_options[]  __initconst = {
        { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
        { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
        { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
@@ -672,7 +673,8 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
        char arg[20];
        int ret, i;
 
-       if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
+       if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
+           cpu_mitigations_off()) {
                return SPEC_STORE_BYPASS_CMD_NONE;
        } else {
                ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
@@ -1008,6 +1010,11 @@ static void __init l1tf_select_mitigation(void)
        if (!boot_cpu_has_bug(X86_BUG_L1TF))
                return;
 
+       if (cpu_mitigations_off())
+               l1tf_mitigation = L1TF_MITIGATION_OFF;
+       else if (cpu_mitigations_auto_nosmt())
+               l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
+
        override_cache_bits(&boot_cpu_data);
 
        switch (l1tf_mitigation) {
index d12226f60168e1d844be6dded3c666509326e7a2..1d9b8aaea06c8c9c7d14b0c30d51ded3bac83d7d 100644 (file)
@@ -124,7 +124,7 @@ static void set_cx86_reorder(void)
        setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
 
        /* Load/Store Serialize to mem access disable (=reorder it) */
-       setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
+       setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
        /* set load/store serialize from 1GB to 4GB */
        ccr3 |= 0xe0;
        setCx86(CX86_CCR3, ccr3);
@@ -135,11 +135,11 @@ static void set_cx86_memwb(void)
        pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
 
        /* CCR2 bit 2: unlock NW bit */
-       setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
+       setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
        /* set 'Not Write-through' */
        write_cr0(read_cr0() | X86_CR0_NW);
        /* CCR2 bit 2: lock NW bit and set WT1 */
-       setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
+       setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
 }
 
 /*
@@ -153,14 +153,14 @@ static void geode_configure(void)
        local_irq_save(flags);
 
        /* Suspend on halt power saving and enable #SUSP pin */
-       setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
+       setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
 
        ccr3 = getCx86(CX86_CCR3);
        setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);       /* enable MAPEN */
 
 
        /* FPU fast, DTE cache, Mem bypass */
-       setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
+       setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
        setCx86(CX86_CCR3, ccr3);                       /* disable MAPEN */
 
        set_cx86_memwb();
@@ -296,7 +296,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
                /* GXm supports extended cpuid levels 'ala' AMD */
                if (c->cpuid_level == 2) {
                        /* Enable cxMMX extensions (GX1 Datasheet 54) */
-                       setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
+                       setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
 
                        /*
                         * GXm : 0x30 ... 0x5f GXm  datasheet 51
@@ -319,7 +319,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
                if (dir1 > 7) {
                        dir0_msn++;  /* M II */
                        /* Enable MMX extensions (App note 108) */
-                       setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
+                       setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
                } else {
                        /* A 6x86MX - it has the bug. */
                        set_cpu_bug(c, X86_BUG_COMA);
index fc3c07fe7df58a22c01c8c1180d0b394bde8b59a..3142fd7a9b32201fe34f9933232127c89c09c017 100644 (file)
@@ -611,8 +611,8 @@ static void init_intel_energy_perf(struct cpuinfo_x86 *c)
        if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE)
                return;
 
-       pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
-       pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
+       pr_info_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
+       pr_info_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
        epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
        wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
 }
index 97f9ada9cedaf4e7cde47e819f7b4a0d6ac39d77..5260185cbf7ba1a77ecc30bdd61a99a2338b159b 100644 (file)
@@ -608,6 +608,8 @@ static int microcode_reload_late(void)
        if (ret > 0)
                microcode_check();
 
+       pr_info("Reload completed, microcode revision: 0x%x\n", boot_cpu_data.microcode);
+
        return ret;
 }
 
index f33f11f69078e7f4497e48ae771466b74a4405a2..1573a0a6b52530f1759429bbee15476e29e31126 100644 (file)
@@ -501,11 +501,8 @@ out_unlock:
 void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms)
 {
        unsigned long delay = msecs_to_jiffies(delay_ms);
-       struct rdt_resource *r;
        int cpu;
 
-       r = &rdt_resources_all[RDT_RESOURCE_L3];
-
        cpu = cpumask_any(&dom->cpu_mask);
        dom->cqm_work_cpu = cpu;
 
index 399601eda8e43c2cf8a855b44b4dc811a247c9e5..85212a32b54df8be06365eeaf4ef399996a3abff 100644 (file)
@@ -2039,14 +2039,14 @@ out:
 enum rdt_param {
        Opt_cdp,
        Opt_cdpl2,
-       Opt_mba_mpbs,
+       Opt_mba_mbps,
        nr__rdt_params
 };
 
 static const struct fs_parameter_spec rdt_param_specs[] = {
        fsparam_flag("cdp",             Opt_cdp),
        fsparam_flag("cdpl2",           Opt_cdpl2),
-       fsparam_flag("mba_mpbs",        Opt_mba_mpbs),
+       fsparam_flag("mba_MBps",        Opt_mba_mbps),
        {}
 };
 
@@ -2072,7 +2072,7 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
        case Opt_cdpl2:
                ctx->enable_cdpl2 = true;
                return 0;
-       case Opt_mba_mpbs:
+       case Opt_mba_mbps:
                if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
                        return -EINVAL;
                ctx->enable_mba_mbps = true;
@@ -2610,9 +2610,10 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
                        rdt_last_cmd_puts("Failed to initialize allocations\n");
                        return ret;
                }
-               rdtgrp->mode = RDT_MODE_SHAREABLE;
        }
 
+       rdtgrp->mode = RDT_MODE_SHAREABLE;
+
        return 0;
 }
 
index dfd3aca82c61cbe345f462a62ea6b52fa0b81516..fb32925a2e62bc462c22429c4a5a5c73fca3c028 100644 (file)
@@ -905,6 +905,8 @@ int __init hpet_enable(void)
                return 0;
 
        hpet_set_mapping();
+       if (!hpet_virt_address)
+               return 0;
 
        /*
         * Read the period and check for a sane value:
index ff9bfd40429efeb7b4868d370628356e28265ec1..d7308302100276539e5c78798ee7ff013d719aa8 100644 (file)
@@ -354,6 +354,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
 #endif
        default:
                WARN_ON_ONCE(1);
+               return -EINVAL;
        }
 
        /*
index a034cb808e7eb482e6fd8eae3fac9afca63b429c..fed46ddb1eef2d3de307f1cbb899f45c4b3e67c2 100644 (file)
@@ -569,6 +569,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
        unsigned long *sara = stack_addr(regs);
 
        ri->ret_addr = (kprobe_opcode_t *) *sara;
+       ri->fp = sara;
 
        /* Replace the return addr with trampoline addr */
        *sara = (unsigned long) &kretprobe_trampoline;
@@ -748,26 +749,48 @@ asm(
 NOKPROBE_SYMBOL(kretprobe_trampoline);
 STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
 
+static struct kprobe kretprobe_kprobe = {
+       .addr = (void *)kretprobe_trampoline,
+};
+
 /*
  * Called from kretprobe_trampoline
  */
 static __used void *trampoline_handler(struct pt_regs *regs)
 {
+       struct kprobe_ctlblk *kcb;
        struct kretprobe_instance *ri = NULL;
        struct hlist_head *head, empty_rp;
        struct hlist_node *tmp;
        unsigned long flags, orig_ret_address = 0;
        unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
        kprobe_opcode_t *correct_ret_addr = NULL;
+       void *frame_pointer;
+       bool skipped = false;
+
+       preempt_disable();
+
+       /*
+        * Set a dummy kprobe for avoiding kretprobe recursion.
+        * Since kretprobe never run in kprobe handler, kprobe must not
+        * be running at this point.
+        */
+       kcb = get_kprobe_ctlblk();
+       __this_cpu_write(current_kprobe, &kretprobe_kprobe);
+       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 
        INIT_HLIST_HEAD(&empty_rp);
        kretprobe_hash_lock(current, &head, &flags);
        /* fixup registers */
 #ifdef CONFIG_X86_64
        regs->cs = __KERNEL_CS;
+       /* On x86-64, we use pt_regs->sp for return address holder. */
+       frame_pointer = &regs->sp;
 #else
        regs->cs = __KERNEL_CS | get_kernel_rpl();
        regs->gs = 0;
+       /* On x86-32, we use pt_regs->flags for return address holder. */
+       frame_pointer = &regs->flags;
 #endif
        regs->ip = trampoline_address;
        regs->orig_ax = ~0UL;
@@ -789,8 +812,25 @@ static __used void *trampoline_handler(struct pt_regs *regs)
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
+               /*
+                * Return probes must be pushed on this hash list correct
+                * order (same as return order) so that it can be poped
+                * correctly. However, if we find it is pushed it incorrect
+                * order, this means we find a function which should not be
+                * probed, because the wrong order entry is pushed on the
+                * path of processing other kretprobe itself.
+                */
+               if (ri->fp != frame_pointer) {
+                       if (!skipped)
+                               pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
+                       skipped = true;
+                       continue;
+               }
 
                orig_ret_address = (unsigned long)ri->ret_addr;
+               if (skipped)
+                       pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
+                               ri->rp->kp.addr);
 
                if (orig_ret_address != trampoline_address)
                        /*
@@ -808,14 +848,15 @@ static __used void *trampoline_handler(struct pt_regs *regs)
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
+               if (ri->fp != frame_pointer)
+                       continue;
 
                orig_ret_address = (unsigned long)ri->ret_addr;
                if (ri->rp && ri->rp->handler) {
                        __this_cpu_write(current_kprobe, &ri->rp->kp);
-                       get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
                        ri->ret_addr = correct_ret_addr;
                        ri->rp->handler(ri, regs);
-                       __this_cpu_write(current_kprobe, NULL);
+                       __this_cpu_write(current_kprobe, &kretprobe_kprobe);
                }
 
                recycle_rp_inst(ri, &empty_rp);
@@ -831,6 +872,9 @@ static __used void *trampoline_handler(struct pt_regs *regs)
 
        kretprobe_hash_unlock(current, &flags);
 
+       __this_cpu_write(current_kprobe, NULL);
+       preempt_enable();
+
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
index 3482460d984d0395830c6227a39e639a5aa87b07..1bfe5c6e6cfe1a1e414b20dc4c92bce29ef9fbf7 100644 (file)
@@ -598,8 +598,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
                        mpf_base = base;
                        mpf_found = true;
 
-                       pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
-                               base, base + sizeof(*mpf) - 1, mpf);
+                       pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
+                               base, base + sizeof(*mpf) - 1);
 
                        memblock_reserve(base, sizeof(*mpf));
                        if (mpf->physptr)
index c06c4c16c6b69c0d251505fa4c03a658c5f938a6..07c30ee1742542f15923b6e4ab7020b22bc634ad 100644 (file)
@@ -59,18 +59,34 @@ static unsigned int pt_regs_offset[PERF_REG_X86_MAX] = {
 
 u64 perf_reg_value(struct pt_regs *regs, int idx)
 {
+       struct x86_perf_regs *perf_regs;
+
+       if (idx >= PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) {
+               perf_regs = container_of(regs, struct x86_perf_regs, regs);
+               if (!perf_regs->xmm_regs)
+                       return 0;
+               return perf_regs->xmm_regs[idx - PERF_REG_X86_XMM0];
+       }
+
        if (WARN_ON_ONCE(idx >= ARRAY_SIZE(pt_regs_offset)))
                return 0;
 
        return regs_get_register(regs, pt_regs_offset[idx]);
 }
 
-#define REG_RESERVED (~((1ULL << PERF_REG_X86_MAX) - 1ULL))
-
 #ifdef CONFIG_X86_32
+#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \
+                      (1ULL << PERF_REG_X86_R9) | \
+                      (1ULL << PERF_REG_X86_R10) | \
+                      (1ULL << PERF_REG_X86_R11) | \
+                      (1ULL << PERF_REG_X86_R12) | \
+                      (1ULL << PERF_REG_X86_R13) | \
+                      (1ULL << PERF_REG_X86_R14) | \
+                      (1ULL << PERF_REG_X86_R15))
+
 int perf_reg_validate(u64 mask)
 {
-       if (!mask || mask & REG_RESERVED)
+       if (!mask || (mask & REG_NOSUPPORT))
                return -EINVAL;
 
        return 0;
@@ -96,10 +112,7 @@ void perf_get_regs_user(struct perf_regs *regs_user,
 
 int perf_reg_validate(u64 mask)
 {
-       if (!mask || mask & REG_RESERVED)
-               return -EINVAL;
-
-       if (mask & REG_NOSUPPORT)
+       if (!mask || (mask & REG_NOSUPPORT))
                return -EINVAL;
 
        return 0;
index 58ac7be52c7a6df944dca7305492b8ce70ed8d8e..957eae13b37008339b6dfaad350ea9468fccb760 100644 (file)
@@ -426,6 +426,8 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
        u64 msr = x86_spec_ctrl_base;
        bool updmsr = false;
 
+       lockdep_assert_irqs_disabled();
+
        /*
         * If TIF_SSBD is different, select the proper mitigation
         * method. Note that if SSBD mitigation is disabled or permanentely
@@ -477,10 +479,12 @@ static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
 
 void speculation_ctrl_update(unsigned long tif)
 {
+       unsigned long flags;
+
        /* Forced update. Make sure all relevant TIF flags are different */
-       preempt_disable();
+       local_irq_save(flags);
        __speculation_ctrl_update(~tif, tif);
-       preempt_enable();
+       local_irq_restore(flags);
 }
 
 /* Called from seccomp/prctl update */
index e471d8e6f0b248951a44654f5222ef217fd4dd2e..70933193878caafa4a6414dd7313aa3b7a840d84 100644 (file)
@@ -127,6 +127,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
        struct task_struct *tsk;
        int err;
 
+       /*
+        * For a new task use the RESET flags value since there is no before.
+        * All the status flags are zero; DF and all the system flags must also
+        * be 0, specifically IF must be 0 because we context switch to the new
+        * task with interrupts disabled.
+        */
+       frame->flags = X86_EFLAGS_FIXED;
        frame->bp = 0;
        frame->ret_addr = (unsigned long) ret_from_fork;
        p->thread.sp = (unsigned long) fork_frame;
index 6a62f4af9fcf72d8979f7a842d2932fa34338afc..844a28b29967ded4b78e3e69ffda3a6d6a2097d1 100644 (file)
@@ -392,6 +392,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
        childregs = task_pt_regs(p);
        fork_frame = container_of(childregs, struct fork_frame, regs);
        frame = &fork_frame->frame;
+
        frame->bp = 0;
        frame->ret_addr = (unsigned long) ret_from_fork;
        p->thread.sp = (unsigned long) fork_frame;
index 725624b6c0c05cdc0c94175214a7ce796df47eee..8fd3cedd9accdd1c17757e5a381b2ab1eac1c032 100644 (file)
@@ -81,6 +81,19 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
        return 0;
 }
 
+/*
+ * Some machines don't handle the default ACPI reboot method and
+ * require the EFI reboot method:
+ */
+static int __init set_efi_reboot(const struct dmi_system_id *d)
+{
+       if (reboot_type != BOOT_EFI && !efi_runtime_disabled()) {
+               reboot_type = BOOT_EFI;
+               pr_info("%s series board detected. Selecting EFI-method for reboot.\n", d->ident);
+       }
+       return 0;
+}
+
 void __noreturn machine_real_restart(unsigned int type)
 {
        local_irq_disable();
@@ -166,6 +179,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
                },
        },
+       {       /* Handle reboot issue on Acer TravelMate X514-51T */
+               .callback = set_efi_reboot,
+               .ident = "Acer TravelMate X514-51T",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate X514-51T"),
+               },
+       },
 
        /* Apple */
        {       /* Handle problems with rebooting on Apple MacBook5 */
index 3d872a527cd966facecec3bd8440677ae1d99204..3773905cd2c1d2d3f4365224137324ad7eafa4c7 100644 (file)
@@ -1005,13 +1005,11 @@ void __init setup_arch(char **cmdline_p)
        if (efi_enabled(EFI_BOOT))
                efi_init();
 
-       dmi_scan_machine();
-       dmi_memdev_walk();
-       dmi_set_dump_stack_arch_desc();
+       dmi_setup();
 
        /*
         * VMware detection requires dmi to be available, so this
-        * needs to be done after dmi_scan_machine(), for the boot CPU.
+        * needs to be done after dmi_setup(), for the boot CPU.
         */
        init_hypervisor_platform();
 
index 08dfd4c1a4f95a19c78c855028e95b6be01ba02b..dff90fb6a9af61fae4d842db282a79d3214eb181 100644 (file)
@@ -132,16 +132,6 @@ static int restore_sigcontext(struct pt_regs *regs,
                COPY_SEG_CPL3(cs);
                COPY_SEG_CPL3(ss);
 
-#ifdef CONFIG_X86_64
-               /*
-                * Fix up SS if needed for the benefit of old DOSEMU and
-                * CRIU.
-                */
-               if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) &&
-                            user_64bit_mode(regs)))
-                       force_valid_ss(regs);
-#endif
-
                get_user_ex(tmpflags, &sc->flags);
                regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
                regs->orig_ax = -1;             /* disable syscall checks */
@@ -150,6 +140,15 @@ static int restore_sigcontext(struct pt_regs *regs,
                buf = (void __user *)buf_val;
        } get_user_catch(err);
 
+#ifdef CONFIG_X86_64
+       /*
+        * Fix up SS if needed for the benefit of old DOSEMU and
+        * CRIU.
+        */
+       if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
+               force_valid_ss(regs);
+#endif
+
        err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
 
        force_iret();
@@ -461,6 +460,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
 {
        struct rt_sigframe __user *frame;
        void __user *fp = NULL;
+       unsigned long uc_flags;
        int err = 0;
 
        frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
@@ -473,9 +473,11 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
                        return -EFAULT;
        }
 
+       uc_flags = frame_uc_flags(regs);
+
        put_user_try {
                /* Create the ucontext.  */
-               put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
+               put_user_ex(uc_flags, &frame->uc.uc_flags);
                put_user_ex(0, &frame->uc.uc_link);
                save_altstack_ex(&frame->uc.uc_stack, regs->sp);
 
@@ -541,6 +543,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
 {
 #ifdef CONFIG_X86_X32_ABI
        struct rt_sigframe_x32 __user *frame;
+       unsigned long uc_flags;
        void __user *restorer;
        int err = 0;
        void __user *fpstate = NULL;
@@ -555,9 +558,11 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
                        return -EFAULT;
        }
 
+       uc_flags = frame_uc_flags(regs);
+
        put_user_try {
                /* Create the ucontext.  */
-               put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
+               put_user_ex(uc_flags, &frame->uc.uc_flags);
                put_user_ex(0, &frame->uc.uc_link);
                compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
                put_user_ex(0, &frame->uc.uc__pad0);
@@ -688,10 +693,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
        sigset_t *set = sigmask_to_save();
        compat_sigset_t *cset = (compat_sigset_t *) set;
 
-       /*
-        * Increment event counter and perform fixup for the pre-signal
-        * frame.
-        */
+       /* Perform fixup for the pre-signal frame. */
        rseq_signal_deliver(ksig, regs);
 
        /* Set up the stack frame */
index 5c2d71a1dc069fd2b7ea2457fdd83e85c5e3383a..2abf27d7df6b8b8b46b972a52619993075f0812b 100644 (file)
 #include <asm/stacktrace.h>
 #include <asm/unwind.h>
 
-static int save_stack_address(struct stack_trace *trace, unsigned long addr,
-                             bool nosched)
-{
-       if (nosched && in_sched_functions(addr))
-               return 0;
-
-       if (trace->skip > 0) {
-               trace->skip--;
-               return 0;
-       }
-
-       if (trace->nr_entries >= trace->max_entries)
-               return -1;
-
-       trace->entries[trace->nr_entries++] = addr;
-       return 0;
-}
-
-static void noinline __save_stack_trace(struct stack_trace *trace,
-                              struct task_struct *task, struct pt_regs *regs,
-                              bool nosched)
+void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+                    struct task_struct *task, struct pt_regs *regs)
 {
        struct unwind_state state;
        unsigned long addr;
 
-       if (regs)
-               save_stack_address(trace, regs->ip, nosched);
+       if (regs && !consume_entry(cookie, regs->ip, false))
+               return;
 
        for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
             unwind_next_frame(&state)) {
                addr = unwind_get_return_address(&state);
-               if (!addr || save_stack_address(trace, addr, nosched))
+               if (!addr || !consume_entry(cookie, addr, false))
                        break;
        }
-
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 
 /*
- * Save stack-backtrace addresses into a stack_trace buffer.
+ * This function returns an error if it detects any unreliable features of the
+ * stack.  Otherwise it guarantees that the stack trace is reliable.
+ *
+ * If the task is not 'current', the caller *must* ensure the task is inactive.
  */
-void save_stack_trace(struct stack_trace *trace)
-{
-       trace->skip++;
-       __save_stack_trace(trace, current, NULL, false);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace);
-
-void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
-{
-       __save_stack_trace(trace, current, regs, false);
-}
-
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
-{
-       if (!try_get_task_stack(tsk))
-               return;
-
-       if (tsk == current)
-               trace->skip++;
-       __save_stack_trace(trace, tsk, NULL, true);
-
-       put_task_stack(tsk);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
-
-#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
-
-static int __always_inline
-__save_stack_trace_reliable(struct stack_trace *trace,
-                           struct task_struct *task)
+int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+                            void *cookie, struct task_struct *task)
 {
        struct unwind_state state;
        struct pt_regs *regs;
@@ -97,7 +50,7 @@ __save_stack_trace_reliable(struct stack_trace *trace,
                if (regs) {
                        /* Success path for user tasks */
                        if (user_mode(regs))
-                               goto success;
+                               return 0;
 
                        /*
                         * Kernel mode registers on the stack indicate an
@@ -120,7 +73,7 @@ __save_stack_trace_reliable(struct stack_trace *trace,
                if (!addr)
                        return -EINVAL;
 
-               if (save_stack_address(trace, addr, false))
+               if (!consume_entry(cookie, addr, false))
                        return -EINVAL;
        }
 
@@ -132,39 +85,9 @@ __save_stack_trace_reliable(struct stack_trace *trace,
        if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
                return -EINVAL;
 
-success:
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
-
        return 0;
 }
 
-/*
- * This function returns an error if it detects any unreliable features of the
- * stack.  Otherwise it guarantees that the stack trace is reliable.
- *
- * If the task is not 'current', the caller *must* ensure the task is inactive.
- */
-int save_stack_trace_tsk_reliable(struct task_struct *tsk,
-                                 struct stack_trace *trace)
-{
-       int ret;
-
-       /*
-        * If the task doesn't have a stack (e.g., a zombie), the stack is
-        * "reliably" empty.
-        */
-       if (!try_get_task_stack(tsk))
-               return 0;
-
-       ret = __save_stack_trace_reliable(trace, tsk);
-
-       put_task_stack(tsk);
-
-       return ret;
-}
-#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
-
 /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
 
 struct stack_frame_user {
@@ -189,15 +112,15 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
        return ret;
 }
 
-static inline void __save_stack_trace_user(struct stack_trace *trace)
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+                         const struct pt_regs *regs)
 {
-       const struct pt_regs *regs = task_pt_regs(current);
        const void __user *fp = (const void __user *)regs->bp;
 
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = regs->ip;
+       if (!consume_entry(cookie, regs->ip, false))
+               return;
 
-       while (trace->nr_entries < trace->max_entries) {
+       while (1) {
                struct stack_frame_user frame;
 
                frame.next_fp = NULL;
@@ -207,8 +130,8 @@ static inline void __save_stack_trace_user(struct stack_trace *trace)
                if ((unsigned long)fp < regs->sp)
                        break;
                if (frame.ret_addr) {
-                       trace->entries[trace->nr_entries++] =
-                               frame.ret_addr;
+                       if (!consume_entry(cookie, frame.ret_addr, false))
+                               return;
                }
                if (fp == frame.next_fp)
                        break;
@@ -216,14 +139,3 @@ static inline void __save_stack_trace_user(struct stack_trace *trace)
        }
 }
 
-void save_stack_trace_user(struct stack_trace *trace)
-{
-       /*
-        * Trace user stack if we are not a kernel thread
-        */
-       if (current->mm) {
-               __save_stack_trace_user(trace);
-       }
-       if (trace->nr_entries < trace->max_entries)
-               trace->entries[trace->nr_entries++] = ULONG_MAX;
-}
index bad8c51fee6eea6be91d7a594e820470c121c2a9..a5127b2c195f9df3031e1df660764bc1624078f2 100644 (file)
@@ -362,7 +362,7 @@ SECTIONS
        .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
                __bss_start = .;
                *(.bss..page_aligned)
-               *(.bss)
+               *(BSS_MAIN)
                BSS_DECRYPTED
                . = ALIGN(PAGE_SIZE);
                __bss_stop = .;
index c338984c850d28a1213e46f86efc06d425115660..d0d5dd44b4f478524cc959cefb245695d9e40894 100644 (file)
@@ -2331,24 +2331,18 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)
 
 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
 {
+#ifdef CONFIG_X86_64
        u32 eax, ebx, ecx, edx;
 
        eax = 0x80000001;
        ecx = 0;
        ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
        return edx & bit(X86_FEATURE_LM);
+#else
+       return false;
+#endif
 }
 
-#define GET_SMSTATE(type, smbase, offset)                                \
-       ({                                                                \
-        type __val;                                                      \
-        int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val,      \
-                                     sizeof(__val));                     \
-        if (r != X86EMUL_CONTINUE)                                       \
-                return X86EMUL_UNHANDLEABLE;                             \
-        __val;                                                           \
-       })
-
 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
 {
        desc->g    = (flags >> 23) & 1;
@@ -2361,27 +2355,30 @@ static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
        desc->type = (flags >>  8) & 15;
 }
 
-static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
+static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
+                          int n)
 {
        struct desc_struct desc;
        int offset;
        u16 selector;
 
-       selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
+       selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
 
        if (n < 3)
                offset = 0x7f84 + n * 12;
        else
                offset = 0x7f2c + (n - 3) * 12;
 
-       set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
-       set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
-       rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
+       set_desc_base(&desc,      GET_SMSTATE(u32, smstate, offset + 8));
+       set_desc_limit(&desc,     GET_SMSTATE(u32, smstate, offset + 4));
+       rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
        ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
        return X86EMUL_CONTINUE;
 }
 
-static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
+#ifdef CONFIG_X86_64
+static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
+                          int n)
 {
        struct desc_struct desc;
        int offset;
@@ -2390,15 +2387,16 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 
        offset = 0x7e00 + n * 16;
 
-       selector =                GET_SMSTATE(u16, smbase, offset);
-       rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
-       set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
-       set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
-       base3 =                   GET_SMSTATE(u32, smbase, offset + 12);
+       selector =                GET_SMSTATE(u16, smstate, offset);
+       rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
+       set_desc_limit(&desc,     GET_SMSTATE(u32, smstate, offset + 4));
+       set_desc_base(&desc,      GET_SMSTATE(u32, smstate, offset + 8));
+       base3 =                   GET_SMSTATE(u32, smstate, offset + 12);
 
        ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
        return X86EMUL_CONTINUE;
 }
+#endif
 
 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
                                    u64 cr0, u64 cr3, u64 cr4)
@@ -2445,7 +2443,8 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
        return X86EMUL_CONTINUE;
 }
 
-static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
+static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
+                            const char *smstate)
 {
        struct desc_struct desc;
        struct desc_ptr dt;
@@ -2453,53 +2452,55 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
        u32 val, cr0, cr3, cr4;
        int i;
 
-       cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
-       cr3 =                      GET_SMSTATE(u32, smbase, 0x7ff8);
-       ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
-       ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
+       cr0 =                      GET_SMSTATE(u32, smstate, 0x7ffc);
+       cr3 =                      GET_SMSTATE(u32, smstate, 0x7ff8);
+       ctxt->eflags =             GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
+       ctxt->_eip =               GET_SMSTATE(u32, smstate, 0x7ff0);
 
        for (i = 0; i < 8; i++)
-               *reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
+               *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
 
-       val = GET_SMSTATE(u32, smbase, 0x7fcc);
+       val = GET_SMSTATE(u32, smstate, 0x7fcc);
        ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
-       val = GET_SMSTATE(u32, smbase, 0x7fc8);
+       val = GET_SMSTATE(u32, smstate, 0x7fc8);
        ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
-       selector =                 GET_SMSTATE(u32, smbase, 0x7fc4);
-       set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f64));
-       set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f60));
-       rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f5c));
+       selector =                 GET_SMSTATE(u32, smstate, 0x7fc4);
+       set_desc_base(&desc,       GET_SMSTATE(u32, smstate, 0x7f64));
+       set_desc_limit(&desc,      GET_SMSTATE(u32, smstate, 0x7f60));
+       rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smstate, 0x7f5c));
        ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
 
-       selector =                 GET_SMSTATE(u32, smbase, 0x7fc0);
-       set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f80));
-       set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f7c));
-       rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f78));
+       selector =                 GET_SMSTATE(u32, smstate, 0x7fc0);
+       set_desc_base(&desc,       GET_SMSTATE(u32, smstate, 0x7f80));
+       set_desc_limit(&desc,      GET_SMSTATE(u32, smstate, 0x7f7c));
+       rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smstate, 0x7f78));
        ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
 
-       dt.address =               GET_SMSTATE(u32, smbase, 0x7f74);
-       dt.size =                  GET_SMSTATE(u32, smbase, 0x7f70);
+       dt.address =               GET_SMSTATE(u32, smstate, 0x7f74);
+       dt.size =                  GET_SMSTATE(u32, smstate, 0x7f70);
        ctxt->ops->set_gdt(ctxt, &dt);
 
-       dt.address =               GET_SMSTATE(u32, smbase, 0x7f58);
-       dt.size =                  GET_SMSTATE(u32, smbase, 0x7f54);
+       dt.address =               GET_SMSTATE(u32, smstate, 0x7f58);
+       dt.size =                  GET_SMSTATE(u32, smstate, 0x7f54);
        ctxt->ops->set_idt(ctxt, &dt);
 
        for (i = 0; i < 6; i++) {
-               int r = rsm_load_seg_32(ctxt, smbase, i);
+               int r = rsm_load_seg_32(ctxt, smstate, i);
                if (r != X86EMUL_CONTINUE)
                        return r;
        }
 
-       cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
+       cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
 
-       ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
+       ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
 
        return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 }
 
-static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
+#ifdef CONFIG_X86_64
+static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
+                            const char *smstate)
 {
        struct desc_struct desc;
        struct desc_ptr dt;
@@ -2509,43 +2510,43 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
        int i, r;
 
        for (i = 0; i < 16; i++)
-               *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
+               *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
 
-       ctxt->_eip   = GET_SMSTATE(u64, smbase, 0x7f78);
-       ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
+       ctxt->_eip   = GET_SMSTATE(u64, smstate, 0x7f78);
+       ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
 
-       val = GET_SMSTATE(u32, smbase, 0x7f68);
+       val = GET_SMSTATE(u32, smstate, 0x7f68);
        ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
-       val = GET_SMSTATE(u32, smbase, 0x7f60);
+       val = GET_SMSTATE(u32, smstate, 0x7f60);
        ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
-       cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
-       cr3 =                       GET_SMSTATE(u64, smbase, 0x7f50);
-       cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
-       ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
-       val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
+       cr0 =                       GET_SMSTATE(u64, smstate, 0x7f58);
+       cr3 =                       GET_SMSTATE(u64, smstate, 0x7f50);
+       cr4 =                       GET_SMSTATE(u64, smstate, 0x7f48);
+       ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
+       val =                       GET_SMSTATE(u64, smstate, 0x7ed0);
        ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
 
-       selector =                  GET_SMSTATE(u32, smbase, 0x7e90);
-       rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e92) << 8);
-       set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e94));
-       set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e98));
-       base3 =                     GET_SMSTATE(u32, smbase, 0x7e9c);
+       selector =                  GET_SMSTATE(u32, smstate, 0x7e90);
+       rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smstate, 0x7e92) << 8);
+       set_desc_limit(&desc,       GET_SMSTATE(u32, smstate, 0x7e94));
+       set_desc_base(&desc,        GET_SMSTATE(u32, smstate, 0x7e98));
+       base3 =                     GET_SMSTATE(u32, smstate, 0x7e9c);
        ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
 
-       dt.size =                   GET_SMSTATE(u32, smbase, 0x7e84);
-       dt.address =                GET_SMSTATE(u64, smbase, 0x7e88);
+       dt.size =                   GET_SMSTATE(u32, smstate, 0x7e84);
+       dt.address =                GET_SMSTATE(u64, smstate, 0x7e88);
        ctxt->ops->set_idt(ctxt, &dt);
 
-       selector =                  GET_SMSTATE(u32, smbase, 0x7e70);
-       rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e72) << 8);
-       set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e74));
-       set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e78));
-       base3 =                     GET_SMSTATE(u32, smbase, 0x7e7c);
+       selector =                  GET_SMSTATE(u32, smstate, 0x7e70);
+       rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smstate, 0x7e72) << 8);
+       set_desc_limit(&desc,       GET_SMSTATE(u32, smstate, 0x7e74));
+       set_desc_base(&desc,        GET_SMSTATE(u32, smstate, 0x7e78));
+       base3 =                     GET_SMSTATE(u32, smstate, 0x7e7c);
        ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
 
-       dt.size =                   GET_SMSTATE(u32, smbase, 0x7e64);
-       dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
+       dt.size =                   GET_SMSTATE(u32, smstate, 0x7e64);
+       dt.address =                GET_SMSTATE(u64, smstate, 0x7e68);
        ctxt->ops->set_gdt(ctxt, &dt);
 
        r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
@@ -2553,37 +2554,49 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
                return r;
 
        for (i = 0; i < 6; i++) {
-               r = rsm_load_seg_64(ctxt, smbase, i);
+               r = rsm_load_seg_64(ctxt, smstate, i);
                if (r != X86EMUL_CONTINUE)
                        return r;
        }
 
        return X86EMUL_CONTINUE;
 }
+#endif
 
 static int em_rsm(struct x86_emulate_ctxt *ctxt)
 {
        unsigned long cr0, cr4, efer;
+       char buf[512];
        u64 smbase;
        int ret;
 
        if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
                return emulate_ud(ctxt);
 
+       smbase = ctxt->ops->get_smbase(ctxt);
+
+       ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
+       if (ret != X86EMUL_CONTINUE)
+               return X86EMUL_UNHANDLEABLE;
+
+       if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
+               ctxt->ops->set_nmi_mask(ctxt, false);
+
+       ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
+               ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
+
        /*
         * Get back to real mode, to prepare a safe state in which to load
         * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
         * supports long mode.
         */
-       cr4 = ctxt->ops->get_cr(ctxt, 4);
        if (emulator_has_longmode(ctxt)) {
                struct desc_struct cs_desc;
 
                /* Zero CR4.PCIDE before CR0.PG.  */
-               if (cr4 & X86_CR4_PCIDE) {
+               cr4 = ctxt->ops->get_cr(ctxt, 4);
+               if (cr4 & X86_CR4_PCIDE)
                        ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
-                       cr4 &= ~X86_CR4_PCIDE;
-               }
 
                /* A 32-bit code segment is required to clear EFER.LMA.  */
                memset(&cs_desc, 0, sizeof(cs_desc));
@@ -2597,39 +2610,39 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
        if (cr0 & X86_CR0_PE)
                ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
 
-       /* Now clear CR4.PAE (which must be done before clearing EFER.LME).  */
-       if (cr4 & X86_CR4_PAE)
-               ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
-
-       /* And finally go back to 32-bit mode.  */
-       efer = 0;
-       ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
+       if (emulator_has_longmode(ctxt)) {
+               /* Clear CR4.PAE before clearing EFER.LME. */
+               cr4 = ctxt->ops->get_cr(ctxt, 4);
+               if (cr4 & X86_CR4_PAE)
+                       ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
 
-       smbase = ctxt->ops->get_smbase(ctxt);
+               /* And finally go back to 32-bit mode.  */
+               efer = 0;
+               ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
+       }
 
        /*
         * Give pre_leave_smm() a chance to make ISA-specific changes to the
         * vCPU state (e.g. enter guest mode) before loading state from the SMM
         * state-save area.
         */
-       if (ctxt->ops->pre_leave_smm(ctxt, smbase))
+       if (ctxt->ops->pre_leave_smm(ctxt, buf))
                return X86EMUL_UNHANDLEABLE;
 
+#ifdef CONFIG_X86_64
        if (emulator_has_longmode(ctxt))
-               ret = rsm_load_state_64(ctxt, smbase + 0x8000);
+               ret = rsm_load_state_64(ctxt, buf);
        else
-               ret = rsm_load_state_32(ctxt, smbase + 0x8000);
+#endif
+               ret = rsm_load_state_32(ctxt, buf);
 
        if (ret != X86EMUL_CONTINUE) {
                /* FIXME: should triple fault */
                return X86EMUL_UNHANDLEABLE;
        }
 
-       if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
-               ctxt->ops->set_nmi_mask(ctxt, false);
+       ctxt->ops->post_leave_smm(ctxt);
 
-       ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
-               ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
        return X86EMUL_CONTINUE;
 }
 
index 27c43525a05f1afabeb705b27f955eba5fe5356d..cc24b3a32c449d01bda073f3bb5a5f5e245440a4 100644 (file)
@@ -526,7 +526,9 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
                new_config.enable = 0;
        stimer->config.as_uint64 = new_config.as_uint64;
 
-       stimer_mark_pending(stimer, false);
+       if (stimer->config.enable)
+               stimer_mark_pending(stimer, false);
+
        return 0;
 }
 
@@ -542,7 +544,10 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
                stimer->config.enable = 0;
        else if (stimer->config.auto_enable)
                stimer->config.enable = 1;
-       stimer_mark_pending(stimer, false);
+
+       if (stimer->config.enable)
+               stimer_mark_pending(stimer, false);
+
        return 0;
 }
 
@@ -1366,7 +1371,16 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
 
                valid_bank_mask = BIT_ULL(0);
                sparse_banks[0] = flush.processor_mask;
-               all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS;
+
+               /*
+                * Work around possible WS2012 bug: it sends hypercalls
+                * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
+                * while also expecting us to flush something and crashing if
+                * we don't. Let's treat processor_mask == 0 same as
+                * HV_FLUSH_ALL_PROCESSORS.
+                */
+               all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
+                       flush.processor_mask == 0;
        } else {
                if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
                                            sizeof(flush_ex))))
index 991fdf7fc17fbd9e1a4cab99d688a7af820d397c..bd13fdddbdc4a98782e4c94d2a6403b19e2f9956 100644 (file)
@@ -70,7 +70,6 @@
 #define APIC_BROADCAST                 0xFF
 #define X2APIC_BROADCAST               0xFFFFFFFFul
 
-static bool lapic_timer_advance_adjust_done = false;
 #define LAPIC_TIMER_ADVANCE_ADJUST_DONE 100
 /* step-by-step approximation to mitigate fluctuation */
 #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
@@ -138,6 +137,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
                if (offset <= max_apic_id) {
                        u8 cluster_size = min(max_apic_id - offset + 1, 16U);
 
+                       offset = array_index_nospec(offset, map->max_apic_id + 1);
                        *cluster = &map->phys_map[offset];
                        *mask = dest_id & (0xffff >> (16 - cluster_size));
                } else {
@@ -901,7 +901,8 @@ static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
                if (irq->dest_id > map->max_apic_id) {
                        *bitmap = 0;
                } else {
-                       *dst = &map->phys_map[irq->dest_id];
+                       u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
+                       *dst = &map->phys_map[dest_id];
                        *bitmap = 1;
                }
                return true;
@@ -1480,14 +1481,32 @@ static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
        return false;
 }
 
+static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
+{
+       u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
+
+       /*
+        * If the guest TSC is running at a different ratio than the host, then
+        * convert the delay to nanoseconds to achieve an accurate delay.  Note
+        * that __delay() uses delay_tsc whenever the hardware has TSC, thus
+        * always for VMX enabled hardware.
+        */
+       if (vcpu->arch.tsc_scaling_ratio == kvm_default_tsc_scaling_ratio) {
+               __delay(min(guest_cycles,
+                       nsec_to_cycles(vcpu, timer_advance_ns)));
+       } else {
+               u64 delay_ns = guest_cycles * 1000000ULL;
+               do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
+               ndelay(min_t(u32, delay_ns, timer_advance_ns));
+       }
+}
+
 void wait_lapic_expire(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
+       u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
        u64 guest_tsc, tsc_deadline, ns;
 
-       if (!lapic_in_kernel(vcpu))
-               return;
-
        if (apic->lapic_timer.expired_tscdeadline == 0)
                return;
 
@@ -1499,33 +1518,37 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
        guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
        trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
 
-       /* __delay is delay_tsc whenever the hardware has TSC, thus always.  */
        if (guest_tsc < tsc_deadline)
-               __delay(min(tsc_deadline - guest_tsc,
-                       nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
+               __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
 
-       if (!lapic_timer_advance_adjust_done) {
+       if (!apic->lapic_timer.timer_advance_adjust_done) {
                /* too early */
                if (guest_tsc < tsc_deadline) {
                        ns = (tsc_deadline - guest_tsc) * 1000000ULL;
                        do_div(ns, vcpu->arch.virtual_tsc_khz);
-                       lapic_timer_advance_ns -= min((unsigned int)ns,
-                               lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
+                       timer_advance_ns -= min((u32)ns,
+                               timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
                } else {
                /* too late */
                        ns = (guest_tsc - tsc_deadline) * 1000000ULL;
                        do_div(ns, vcpu->arch.virtual_tsc_khz);
-                       lapic_timer_advance_ns += min((unsigned int)ns,
-                               lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
+                       timer_advance_ns += min((u32)ns,
+                               timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
                }
                if (abs(guest_tsc - tsc_deadline) < LAPIC_TIMER_ADVANCE_ADJUST_DONE)
-                       lapic_timer_advance_adjust_done = true;
+                       apic->lapic_timer.timer_advance_adjust_done = true;
+               if (unlikely(timer_advance_ns > 5000)) {
+                       timer_advance_ns = 0;
+                       apic->lapic_timer.timer_advance_adjust_done = true;
+               }
+               apic->lapic_timer.timer_advance_ns = timer_advance_ns;
        }
 }
 
 static void start_sw_tscdeadline(struct kvm_lapic *apic)
 {
-       u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
+       struct kvm_timer *ktimer = &apic->lapic_timer;
+       u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
        u64 ns = 0;
        ktime_t expire;
        struct kvm_vcpu *vcpu = apic->vcpu;
@@ -1540,13 +1563,15 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
 
        now = ktime_get();
        guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
-       if (likely(tscdeadline > guest_tsc)) {
-               ns = (tscdeadline - guest_tsc) * 1000000ULL;
-               do_div(ns, this_tsc_khz);
+
+       ns = (tscdeadline - guest_tsc) * 1000000ULL;
+       do_div(ns, this_tsc_khz);
+
+       if (likely(tscdeadline > guest_tsc) &&
+           likely(ns > apic->lapic_timer.timer_advance_ns)) {
                expire = ktime_add_ns(now, ns);
-               expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
-               hrtimer_start(&apic->lapic_timer.timer,
-                               expire, HRTIMER_MODE_ABS_PINNED);
+               expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
+               hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_PINNED);
        } else
                apic_timer_expired(apic);
 
@@ -2253,7 +2278,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
                return HRTIMER_NORESTART;
 }
 
-int kvm_create_lapic(struct kvm_vcpu *vcpu)
+int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
 {
        struct kvm_lapic *apic;
 
@@ -2277,6 +2302,14 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
        hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
                     HRTIMER_MODE_ABS_PINNED);
        apic->lapic_timer.timer.function = apic_timer_fn;
+       if (timer_advance_ns == -1) {
+               apic->lapic_timer.timer_advance_ns = 1000;
+               apic->lapic_timer.timer_advance_adjust_done = false;
+       } else {
+               apic->lapic_timer.timer_advance_ns = timer_advance_ns;
+               apic->lapic_timer.timer_advance_adjust_done = true;
+       }
+
 
        /*
         * APIC is created enabled. This will prevent kvm_lapic_set_base from
index ff6ef9c3d760c7d6db6d5ee86d1a0bb21c1c63b5..d6d049ba304526be2974b2c4228b3a70420adbe7 100644 (file)
@@ -31,8 +31,10 @@ struct kvm_timer {
        u32 timer_mode_mask;
        u64 tscdeadline;
        u64 expired_tscdeadline;
+       u32 timer_advance_ns;
        atomic_t pending;                       /* accumulated triggered timers */
        bool hv_timer_in_use;
+       bool timer_advance_adjust_done;
 };
 
 struct kvm_lapic {
@@ -62,7 +64,7 @@ struct kvm_lapic {
 
 struct dest_map;
 
-int kvm_create_lapic(struct kvm_vcpu *vcpu);
+int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns);
 void kvm_free_lapic(struct kvm_vcpu *vcpu);
 
 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
index 7837ab001d806f2f3ffd3a56e30d1bae916f7b03..d9c7b45d231f1582becb071ae6355fc7c63bc79c 100644 (file)
@@ -182,7 +182,7 @@ struct kvm_shadow_walk_iterator {
 
 static const union kvm_mmu_page_role mmu_base_role_mask = {
        .cr0_wp = 1,
-       .cr4_pae = 1,
+       .gpte_is_8_bytes = 1,
        .nxe = 1,
        .smep_andnot_wp = 1,
        .smap_andnot_wp = 1,
@@ -2007,7 +2007,7 @@ static int is_empty_shadow_page(u64 *spt)
  * aggregate version in order to make the slab shrinker
  * faster
  */
-static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
+static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
 {
        kvm->arch.n_used_mmu_pages += nr;
        percpu_counter_add(&kvm_total_used_mmu_pages, nr);
@@ -2205,6 +2205,7 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
                                    struct list_head *invalid_list);
 
+
 #define for_each_valid_sp(_kvm, _sp, _gfn)                             \
        hlist_for_each_entry(_sp,                                       \
          &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
@@ -2215,12 +2216,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
        for_each_valid_sp(_kvm, _sp, _gfn)                              \
                if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
 
+static inline bool is_ept_sp(struct kvm_mmu_page *sp)
+{
+       return sp->role.cr0_wp && sp->role.smap_andnot_wp;
+}
+
 /* @sp->gfn should be write-protected at the call site */
 static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                            struct list_head *invalid_list)
 {
-       if (sp->role.cr4_pae != !!is_pae(vcpu)
-           || vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
+       if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
+           vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
                kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
                return false;
        }
@@ -2232,7 +2238,7 @@ static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
                                        struct list_head *invalid_list,
                                        bool remote_flush)
 {
-       if (!remote_flush && !list_empty(invalid_list))
+       if (!remote_flush && list_empty(invalid_list))
                return false;
 
        if (!list_empty(invalid_list))
@@ -2423,7 +2429,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        role.level = level;
        role.direct = direct;
        if (role.direct)
-               role.cr4_pae = 0;
+               role.gpte_is_8_bytes = true;
        role.access = access;
        if (!vcpu->arch.mmu->direct_map
            && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
@@ -2757,7 +2763,7 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
  * Changing the number of mmu pages allocated to the vm
  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
  */
-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
 {
        LIST_HEAD(invalid_list);
 
@@ -4775,6 +4781,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
        union kvm_mmu_extended_role ext = {0};
 
        ext.cr0_pg = !!is_paging(vcpu);
+       ext.cr4_pae = !!is_pae(vcpu);
        ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
        ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
        ext.cr4_pse = !!is_pse(vcpu);
@@ -4794,7 +4801,6 @@ static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
 
        role.base.access = ACC_ALL;
        role.base.nxe = !!is_nx(vcpu);
-       role.base.cr4_pae = !!is_pae(vcpu);
        role.base.cr0_wp = is_write_protection(vcpu);
        role.base.smm = is_smm(vcpu);
        role.base.guest_mode = is_guest_mode(vcpu);
@@ -4815,6 +4821,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
        role.base.ad_disabled = (shadow_accessed_mask == 0);
        role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
        role.base.direct = true;
+       role.base.gpte_is_8_bytes = true;
 
        return role;
 }
@@ -4879,6 +4886,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
        role.base.smap_andnot_wp = role.ext.cr4_smap &&
                !is_write_protection(vcpu);
        role.base.direct = !is_paging(vcpu);
+       role.base.gpte_is_8_bytes = !!is_pae(vcpu);
 
        if (!is_long_mode(vcpu))
                role.base.level = PT32E_ROOT_LEVEL;
@@ -4918,18 +4926,26 @@ static union kvm_mmu_role
 kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
                                   bool execonly)
 {
-       union kvm_mmu_role role;
+       union kvm_mmu_role role = {0};
 
-       /* Base role is inherited from root_mmu */
-       role.base.word = vcpu->arch.root_mmu.mmu_role.base.word;
-       role.ext = kvm_calc_mmu_role_ext(vcpu);
+       /* SMM flag is inherited from root_mmu */
+       role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
 
        role.base.level = PT64_ROOT_4LEVEL;
+       role.base.gpte_is_8_bytes = true;
        role.base.direct = false;
        role.base.ad_disabled = !accessed_dirty;
        role.base.guest_mode = true;
        role.base.access = ACC_ALL;
 
+       /*
+        * WP=1 and NOT_WP=1 is an impossible combination, use WP and the
+        * SMAP variation to denote shadow EPT entries.
+        */
+       role.base.cr0_wp = true;
+       role.base.smap_andnot_wp = true;
+
+       role.ext = kvm_calc_mmu_role_ext(vcpu);
        role.ext.execonly = execonly;
 
        return role;
@@ -5179,7 +5195,7 @@ static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
                 gpa, bytes, sp->role.word);
 
        offset = offset_in_page(gpa);
-       pte_size = sp->role.cr4_pae ? 8 : 4;
+       pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
 
        /*
         * Sometimes, the OS only writes the last one bytes to update status
@@ -5203,7 +5219,7 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
        page_offset = offset_in_page(gpa);
        level = sp->role.level;
        *nspte = 1;
-       if (!sp->role.cr4_pae) {
+       if (!sp->role.gpte_is_8_bytes) {
                page_offset <<= 1;      /* 32->64 */
                /*
                 * A 32-bit pde maps 4MB while the shadow pdes map
@@ -5393,10 +5409,12 @@ emulate:
         * This can happen if a guest gets a page-fault on data access but the HW
         * table walker is not able to read the instruction page (e.g instruction
         * page is not present in memory). In those cases we simply restart the
-        * guest.
+        * guest, with the exception of AMD Erratum 1096 which is unrecoverable.
         */
-       if (unlikely(insn && !insn_len))
-               return 1;
+       if (unlikely(insn && !insn_len)) {
+               if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu))
+                       return 1;
+       }
 
        er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
 
@@ -5509,7 +5527,9 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
 
                if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
                        if (flush && lock_flush_tlb) {
-                               kvm_flush_remote_tlbs(kvm);
+                               kvm_flush_remote_tlbs_with_address(kvm,
+                                               start_gfn,
+                                               iterator.gfn - start_gfn + 1);
                                flush = false;
                        }
                        cond_resched_lock(&kvm->mmu_lock);
@@ -5517,7 +5537,8 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
        }
 
        if (flush && lock_flush_tlb) {
-               kvm_flush_remote_tlbs(kvm);
+               kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
+                                                  end_gfn - start_gfn + 1);
                flush = false;
        }
 
@@ -6011,10 +6032,10 @@ out:
 /*
  * Calculate mmu pages needed for kvm.
  */
-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
+unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
 {
-       unsigned int nr_mmu_pages;
-       unsigned int  nr_pages = 0;
+       unsigned long nr_mmu_pages;
+       unsigned long nr_pages = 0;
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
        int i;
@@ -6027,8 +6048,7 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
        }
 
        nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
-       nr_mmu_pages = max(nr_mmu_pages,
-                          (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
+       nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
 
        return nr_mmu_pages;
 }
index bbdc60f2fae89beb34c72716d9e7eb9c33584651..54c2a377795be6920bee9676e58555110c3a56b9 100644 (file)
@@ -64,7 +64,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
                                u64 fault_address, char *insn, int insn_len);
 
-static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
+static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
 {
        if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
                return kvm->arch.n_max_mmu_pages -
index 9f6c855a00439a58f5c4ea58e099d935d65e22d9..dd30dccd2ad5e250aef10e889e150011fece3468 100644 (file)
                                                                        \
        role.word = __entry->role;                                      \
                                                                        \
-       trace_seq_printf(p, "sp gfn %llx l%u%s q%u%s %s%s"              \
+       trace_seq_printf(p, "sp gfn %llx l%u %u-byte q%u%s %s%s"        \
                         " %snxe %sad root %u %s%c",                    \
                         __entry->gfn, role.level,                      \
-                        role.cr4_pae ? " pae" : "",                    \
+                        role.gpte_is_8_bytes ? 8 : 4,                  \
                         role.quadrant,                                 \
                         role.direct ? " direct" : "",                  \
                         access_str[role.access],                       \
index 58ead7db71a312764b56d9f242e84820239eeb93..e39741997893a977fdda077ff637bf465fbb1748 100644 (file)
@@ -281,9 +281,13 @@ static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
 {
        bool fast_mode = idx & (1u << 31);
+       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        struct kvm_pmc *pmc;
        u64 ctr_val;
 
+       if (!pmu->version)
+               return 1;
+
        if (is_vmware_backdoor_pmc(idx))
                return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
 
index b5b128a0a05124d275af1f103fe3e40315df80f5..406b558abfef7379eb46bd2de18e5d6890079eb9 100644 (file)
@@ -262,6 +262,7 @@ struct amd_svm_iommu_ir {
 };
 
 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK   (0xFF)
+#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT                        31
 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK               (1 << 31)
 
 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK   (0xFFULL)
@@ -2692,6 +2693,7 @@ static int npf_interception(struct vcpu_svm *svm)
 static int db_interception(struct vcpu_svm *svm)
 {
        struct kvm_run *kvm_run = svm->vcpu.run;
+       struct kvm_vcpu *vcpu = &svm->vcpu;
 
        if (!(svm->vcpu.guest_debug &
              (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
@@ -2702,6 +2704,8 @@ static int db_interception(struct vcpu_svm *svm)
 
        if (svm->nmi_singlestep) {
                disable_nmi_singlestep(svm);
+               /* Make sure we check for pending NMIs upon entry */
+               kvm_make_request(KVM_REQ_EVENT, vcpu);
        }
 
        if (svm->vcpu.guest_debug &
@@ -4517,14 +4521,25 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
                kvm_lapic_reg_write(apic, APIC_ICR, icrl);
                break;
        case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
+               int i;
+               struct kvm_vcpu *vcpu;
+               struct kvm *kvm = svm->vcpu.kvm;
                struct kvm_lapic *apic = svm->vcpu.arch.apic;
 
                /*
-                * Update ICR high and low, then emulate sending IPI,
-                * which is handled when writing APIC_ICR.
+                * At this point, we expect that the AVIC HW has already
+                * set the appropriate IRR bits on the valid target
+                * vcpus. So, we just need to kick the appropriate vcpu.
                 */
-               kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
-               kvm_lapic_reg_write(apic, APIC_ICR, icrl);
+               kvm_for_each_vcpu(i, vcpu, kvm) {
+                       bool m = kvm_apic_match_dest(vcpu, apic,
+                                                    icrl & KVM_APIC_SHORT_MASK,
+                                                    GET_APIC_DEST_FIELD(icrh),
+                                                    icrl & KVM_APIC_DEST_MASK);
+
+                       if (m && !avic_vcpu_is_running(vcpu))
+                               kvm_vcpu_wake_up(vcpu);
+               }
                break;
        }
        case AVIC_IPI_FAILURE_INVALID_TARGET:
@@ -4596,7 +4611,7 @@ static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
        u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
 
        if (entry)
-               WRITE_ONCE(*entry, (u32) ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK);
+               clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
 }
 
 static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
@@ -5621,6 +5636,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        svm->vmcb->save.cr2 = vcpu->arch.cr2;
 
        clgi();
+       kvm_load_guest_xcr0(vcpu);
 
        /*
         * If this vCPU has touched SPEC_CTRL, restore the guest's value if
@@ -5766,6 +5782,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
                kvm_before_interrupt(&svm->vcpu);
 
+       kvm_put_guest_xcr0(vcpu);
        stgi();
 
        /* Any pending NMI will happen here */
@@ -6215,32 +6232,24 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        return 0;
 }
 
-static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct vmcb *nested_vmcb;
        struct page *page;
-       struct {
-               u64 guest;
-               u64 vmcb;
-       } svm_state_save;
-       int ret;
+       u64 guest;
+       u64 vmcb;
 
-       ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save,
-                                 sizeof(svm_state_save));
-       if (ret)
-               return ret;
+       guest = GET_SMSTATE(u64, smstate, 0x7ed8);
+       vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
 
-       if (svm_state_save.guest) {
-               vcpu->arch.hflags &= ~HF_SMM_MASK;
-               nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page);
-               if (nested_vmcb)
-                       enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page);
-               else
-                       ret = 1;
-               vcpu->arch.hflags |= HF_SMM_MASK;
+       if (guest) {
+               nested_vmcb = nested_svm_map(svm, vmcb, &page);
+               if (!nested_vmcb)
+                       return 1;
+               enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
        }
-       return ret;
+       return 0;
 }
 
 static int enable_smi_window(struct kvm_vcpu *vcpu)
@@ -6422,11 +6431,11 @@ e_free:
        return ret;
 }
 
-static int get_num_contig_pages(int idx, struct page **inpages,
-                               unsigned long npages)
+static unsigned long get_num_contig_pages(unsigned long idx,
+                               struct page **inpages, unsigned long npages)
 {
        unsigned long paddr, next_paddr;
-       int i = idx + 1, pages = 1;
+       unsigned long i = idx + 1, pages = 1;
 
        /* find the number of contiguous pages starting from idx */
        paddr = __sme_page_pa(inpages[idx]);
@@ -6445,12 +6454,12 @@ static int get_num_contig_pages(int idx, struct page **inpages,
 
 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
 {
-       unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
+       unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
        struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
        struct kvm_sev_launch_update_data params;
        struct sev_data_launch_update_data *data;
        struct page **inpages;
-       int i, ret, pages;
+       int ret;
 
        if (!sev_guest(kvm))
                return -ENOTTY;
@@ -6799,7 +6808,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
        struct page **src_p, **dst_p;
        struct kvm_sev_dbg debug;
        unsigned long n;
-       int ret, size;
+       unsigned int size;
+       int ret;
 
        if (!sev_guest(kvm))
                return -ENOTTY;
@@ -6807,6 +6817,11 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
        if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
                return -EFAULT;
 
+       if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
+               return -EINVAL;
+       if (!debug.dst_uaddr)
+               return -EINVAL;
+
        vaddr = debug.src_uaddr;
        size = debug.len;
        vaddr_end = vaddr + size;
@@ -6857,8 +6872,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
                                                     dst_vaddr,
                                                     len, &argp->error);
 
-               sev_unpin_memory(kvm, src_p, 1);
-               sev_unpin_memory(kvm, dst_p, 1);
+               sev_unpin_memory(kvm, src_p, n);
+               sev_unpin_memory(kvm, dst_p, n);
 
                if (ret)
                        goto err;
@@ -7098,6 +7113,36 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
        return -ENODEV;
 }
 
+static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
+{
+       bool is_user, smap;
+
+       is_user = svm_get_cpl(vcpu) == 3;
+       smap = !kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
+
+       /*
+        * Detect and workaround Errata 1096 Fam_17h_00_0Fh
+        *
+        * In non SEV guest, hypervisor will be able to read the guest
+        * memory to decode the instruction pointer when insn_len is zero
+        * so we return true to indicate that decoding is possible.
+        *
+        * But in the SEV guest, the guest memory is encrypted with the
+        * guest specific key and hypervisor will not be able to decode the
+        * instruction pointer so we will not able to workaround it. Lets
+        * print the error and request to kill the guest.
+        */
+       if (is_user && smap) {
+               if (!sev_guest(vcpu->kvm))
+                       return true;
+
+               pr_err_ratelimited("KVM: Guest triggered AMD Erratum 1096\n");
+               kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+       }
+
+       return false;
+}
+
 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .cpu_has_kvm_support = has_svm,
        .disabled_by_bios = is_disabled,
@@ -7231,6 +7276,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 
        .nested_enable_evmcs = nested_enable_evmcs,
        .nested_get_evmcs_version = nested_get_evmcs_version,
+
+       .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
 };
 
 static int __init svm_init(void)
index 6432d08c7de79ccbde654b7ab17c9649b75a25c2..4d47a2631d1fb46d9f913b59743cb5417d7401c6 100644 (file)
@@ -438,13 +438,13 @@ TRACE_EVENT(kvm_apic_ipi,
 );
 
 TRACE_EVENT(kvm_apic_accept_irq,
-           TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec),
+           TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
            TP_ARGS(apicid, dm, tm, vec),
 
        TP_STRUCT__entry(
                __field(        __u32,          apicid          )
                __field(        __u16,          dm              )
-               __field(        __u8,           tm              )
+               __field(        __u16,          tm              )
                __field(        __u8,           vec             )
        ),
 
index f24a2c2250706f24741e4503ed5ba60232b3613e..0c601d079cd20e4975f58c0f4fca35c36abbc9f9 100644 (file)
@@ -500,6 +500,17 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
        }
 }
 
+static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
+       int msr;
+
+       for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+               unsigned word = msr / BITS_PER_LONG;
+
+               msr_bitmap[word] = ~0;
+               msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
+       }
+}
+
 /*
  * Merge L0's and L1's MSR bitmap, return false to indicate that
  * we do not use the hardware.
@@ -541,39 +552,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
                return false;
 
        msr_bitmap_l1 = (unsigned long *)kmap(page);
-       if (nested_cpu_has_apic_reg_virt(vmcs12)) {
-               /*
-                * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
-                * just lets the processor take the value from the virtual-APIC page;
-                * take those 256 bits directly from the L1 bitmap.
-                */
-               for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
-                       unsigned word = msr / BITS_PER_LONG;
-                       msr_bitmap_l0[word] = msr_bitmap_l1[word];
-                       msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
-               }
-       } else {
-               for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
-                       unsigned word = msr / BITS_PER_LONG;
-                       msr_bitmap_l0[word] = ~0;
-                       msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
-               }
-       }
 
-       nested_vmx_disable_intercept_for_msr(
-               msr_bitmap_l1, msr_bitmap_l0,
-               X2APIC_MSR(APIC_TASKPRI),
-               MSR_TYPE_W);
+       /*
+        * To keep the control flow simple, pay eight 8-byte writes (sixteen
+        * 4-byte writes on 32-bit systems) up front to enable intercepts for
+        * the x2APIC MSR range and selectively disable them below.
+        */
+       enable_x2apic_msr_intercepts(msr_bitmap_l0);
+
+       if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
+               if (nested_cpu_has_apic_reg_virt(vmcs12)) {
+                       /*
+                        * L0 need not intercept reads for MSRs between 0x800
+                        * and 0x8ff, it just lets the processor take the value
+                        * from the virtual-APIC page; take those 256 bits
+                        * directly from the L1 bitmap.
+                        */
+                       for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+                               unsigned word = msr / BITS_PER_LONG;
+
+                               msr_bitmap_l0[word] = msr_bitmap_l1[word];
+                       }
+               }
 
-       if (nested_cpu_has_vid(vmcs12)) {
-               nested_vmx_disable_intercept_for_msr(
-                       msr_bitmap_l1, msr_bitmap_l0,
-                       X2APIC_MSR(APIC_EOI),
-                       MSR_TYPE_W);
                nested_vmx_disable_intercept_for_msr(
                        msr_bitmap_l1, msr_bitmap_l0,
-                       X2APIC_MSR(APIC_SELF_IPI),
-                       MSR_TYPE_W);
+                       X2APIC_MSR(APIC_TASKPRI),
+                       MSR_TYPE_R | MSR_TYPE_W);
+
+               if (nested_cpu_has_vid(vmcs12)) {
+                       nested_vmx_disable_intercept_for_msr(
+                               msr_bitmap_l1, msr_bitmap_l0,
+                               X2APIC_MSR(APIC_EOI),
+                               MSR_TYPE_W);
+                       nested_vmx_disable_intercept_for_msr(
+                               msr_bitmap_l1, msr_bitmap_l0,
+                               X2APIC_MSR(APIC_SELF_IPI),
+                               MSR_TYPE_W);
+               }
        }
 
        if (spec_ctrl)
@@ -2585,6 +2601,11 @@ static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
            !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
            !nested_cr3_valid(vcpu, vmcs12->host_cr3))
                return -EINVAL;
+
+       if (is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu) ||
+           is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))
+               return -EINVAL;
+
        /*
         * If the load IA32_EFER VM-exit control is 1, bits reserved in the
         * IA32_EFER MSR must be 0 in the field for that register. In addition,
@@ -2852,20 +2873,27 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
                /*
                 * If translation failed, VM entry will fail because
                 * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
-                * Failing the vm entry is _not_ what the processor
-                * does but it's basically the only possibility we
-                * have.  We could still enter the guest if CR8 load
-                * exits are enabled, CR8 store exits are enabled, and
-                * virtualize APIC access is disabled; in this case
-                * the processor would never use the TPR shadow and we
-                * could simply clear the bit from the execution
-                * control.  But such a configuration is useless, so
-                * let's keep the code simple.
                 */
                if (!is_error_page(page)) {
                        vmx->nested.virtual_apic_page = page;
                        hpa = page_to_phys(vmx->nested.virtual_apic_page);
                        vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
+               } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
+                          nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
+                          !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+                       /*
+                        * The processor will never use the TPR shadow, simply
+                        * clear the bit from the execution control.  Such a
+                        * configuration is useless, but it happens in tests.
+                        * For any other configuration, failing the vm entry is
+                        * _not_ what the processor does but it's basically the
+                        * only possibility we have.
+                        */
+                       vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
+                                       CPU_BASED_TPR_SHADOW);
+               } else {
+                       printk("bad virtual-APIC page address\n");
+                       dump_vmcs();
                }
        }
 
@@ -3768,8 +3796,18 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
        vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
 
        nested_ept_uninit_mmu_context(vcpu);
-       vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
-       __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+
+       /*
+        * This is only valid if EPT is in use, otherwise the vmcs01 GUEST_CR3
+        * points to shadow pages!  Fortunately we only get here after a WARN_ON
+        * if EPT is disabled, so a VMabort is perfectly fine.
+        */
+       if (enable_ept) {
+               vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
+               __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+       } else {
+               nested_vmx_abort(vcpu, VMX_ABORT_VMCS_CORRUPTED);
+       }
 
        /*
         * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
@@ -5385,7 +5423,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
                return ret;
 
        /* Empty 'VMXON' state is permitted */
-       if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
+       if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
                return 0;
 
        if (kvm_state->vmx.vmcs_pa != -1ull) {
@@ -5429,7 +5467,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
            vmcs12->vmcs_link_pointer != -1ull) {
                struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
 
-               if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12))
+               if (kvm_state->size < sizeof(*kvm_state) + 2 * sizeof(*vmcs12))
                        return -EINVAL;
 
                if (copy_from_user(shadow_vmcs12,
@@ -5717,6 +5755,14 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
 {
        int i;
 
+       /*
+        * Without EPT it is not possible to restore L1's CR3 and PDPTR on
+        * VMfail, because they are not available in vmcs01.  Just always
+        * use hardware checks.
+        */
+       if (!enable_ept)
+               nested_early_check = 1;
+
        if (!cpu_has_vmx_shadow_vmcs())
                enable_shadow_vmcs = 0;
        if (enable_shadow_vmcs) {
index 7b272738c5768bac029ca3e4f3e6d7b1003260da..d4cb1945b2e3b925210c4edb57c36ab683be0489 100644 (file)
@@ -3,6 +3,7 @@
 #include <asm/asm.h>
 #include <asm/bitsperlong.h>
 #include <asm/kvm_vcpu_regs.h>
+#include <asm/nospec-branch.h>
 
 #define WORD_SIZE (BITS_PER_LONG / 8)
 
@@ -77,6 +78,17 @@ ENDPROC(vmx_vmenter)
  * referred to by VMCS.HOST_RIP.
  */
 ENTRY(vmx_vmexit)
+#ifdef CONFIG_RETPOLINE
+       ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE
+       /* Preserve guest's RAX, it's used to stuff the RSB. */
+       push %_ASM_AX
+
+       /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
+       FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
+
+       pop %_ASM_AX
+.Lvmexit_skip_rsb:
+#endif
        ret
 ENDPROC(vmx_vmexit)
 
index c73375e01ab8c4ca52d5d87e2ca9b3648906485a..0c955bb286fffbdefa168d306341cd7a994946aa 100644 (file)
@@ -1683,12 +1683,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
                msr_info->data = to_vmx(vcpu)->spec_ctrl;
                break;
-       case MSR_IA32_ARCH_CAPABILITIES:
-               if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
-                       return 1;
-               msr_info->data = to_vmx(vcpu)->arch_capabilities;
-               break;
        case MSR_IA32_SYSENTER_CS:
                msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
                break;
@@ -1895,11 +1889,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
                                              MSR_TYPE_W);
                break;
-       case MSR_IA32_ARCH_CAPABILITIES:
-               if (!msr_info->host_initiated)
-                       return 1;
-               vmx->arch_capabilities = data;
-               break;
        case MSR_IA32_CR_PAT:
                if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
                        if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
@@ -4088,8 +4077,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
                ++vmx->nmsrs;
        }
 
-       vmx->arch_capabilities = kvm_get_arch_capabilities();
-
        vm_exit_controls_init(vmx, vmx_vmexit_ctrl());
 
        /* 22.2.1, 20.8.1 */
@@ -5616,7 +5603,7 @@ static void vmx_dump_dtsel(char *name, uint32_t limit)
               vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
 }
 
-static void dump_vmcs(void)
+void dump_vmcs(void)
 {
        u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
        u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
@@ -6423,6 +6410,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
                vmx_set_interrupt_shadow(vcpu, 0);
 
+       kvm_load_guest_xcr0(vcpu);
+
        if (static_cpu_has(X86_FEATURE_PKU) &&
            kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
            vcpu->arch.pkru != vmx->host_pkru)
@@ -6473,9 +6462,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
        x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
 
-       /* Eliminate branch target predictions from guest mode */
-       vmexit_fill_RSB();
-
        /* All fields are clean at this point */
        if (static_branch_unlikely(&enable_evmcs))
                current_evmcs->hv_clean_fields |=
@@ -6519,6 +6505,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
                        __write_pkru(vmx->host_pkru);
        }
 
+       kvm_put_guest_xcr0(vcpu);
+
        vmx->nested.nested_run_pending = 0;
        vmx->idt_vectoring_info = 0;
 
@@ -6865,6 +6853,30 @@ static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
        }
 }
 
+static bool guest_cpuid_has_pmu(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *entry;
+       union cpuid10_eax eax;
+
+       entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
+       if (!entry)
+               return false;
+
+       eax.full = entry->eax;
+       return (eax.split.version_id > 0);
+}
+
+static void nested_vmx_procbased_ctls_update(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       bool pmu_enabled = guest_cpuid_has_pmu(vcpu);
+
+       if (pmu_enabled)
+               vmx->nested.msrs.procbased_ctls_high |= CPU_BASED_RDPMC_EXITING;
+       else
+               vmx->nested.msrs.procbased_ctls_high &= ~CPU_BASED_RDPMC_EXITING;
+}
+
 static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6953,6 +6965,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
        if (nested_vmx_allowed(vcpu)) {
                nested_vmx_cr_fixed1_bits_update(vcpu);
                nested_vmx_entry_exit_ctls_update(vcpu);
+               nested_vmx_procbased_ctls_update(vcpu);
        }
 
        if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
@@ -7016,6 +7029,7 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
 {
        struct vcpu_vmx *vmx;
        u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
+       struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
 
        if (kvm_mwait_in_guest(vcpu->kvm))
                return -EOPNOTSUPP;
@@ -7024,7 +7038,8 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
        tscl = rdtsc();
        guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
        delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
-       lapic_timer_advance_cycles = nsec_to_cycles(vcpu, lapic_timer_advance_ns);
+       lapic_timer_advance_cycles = nsec_to_cycles(vcpu,
+                                                   ktimer->timer_advance_ns);
 
        if (delta_tsc > lapic_timer_advance_cycles)
                delta_tsc -= lapic_timer_advance_cycles;
@@ -7382,7 +7397,7 @@ static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        return 0;
 }
 
-static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        int ret;
@@ -7393,9 +7408,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
        }
 
        if (vmx->nested.smm.guest_mode) {
-               vcpu->arch.hflags &= ~HF_SMM_MASK;
                ret = nested_vmx_enter_non_root_mode(vcpu, false);
-               vcpu->arch.hflags |= HF_SMM_MASK;
                if (ret)
                        return ret;
 
@@ -7409,6 +7422,11 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
 static __init int hardware_setup(void)
 {
        unsigned long host_bndcfgs;
@@ -7711,6 +7729,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .set_nested_state = NULL,
        .get_vmcs12_pages = NULL,
        .nested_enable_evmcs = NULL,
+       .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
 };
 
 static void vmx_cleanup_l1d_flush(void)
index 1554cb45b3931a6de9d91b0092de1ff85d4c993f..f879529906b48cd84e99cc0f672210aaeaffeabd 100644 (file)
@@ -190,7 +190,6 @@ struct vcpu_vmx {
        u64                   msr_guest_kernel_gs_base;
 #endif
 
-       u64                   arch_capabilities;
        u64                   spec_ctrl;
 
        u32 vm_entry_controls_shadow;
@@ -518,4 +517,6 @@ static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
        vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
 }
 
+void dump_vmcs(void);
+
 #endif /* __KVM_X86_VMX_H */
index 65e4559eef2fc8589e0a4277077e766ceead3994..b5edc8e3ce1dffbd9edeb8b0025bf6277dcc8703 100644 (file)
@@ -136,10 +136,14 @@ EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
 static u32 __read_mostly tsc_tolerance_ppm = 250;
 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
 
-/* lapic timer advance (tscdeadline mode only) in nanoseconds */
-unsigned int __read_mostly lapic_timer_advance_ns = 1000;
+/*
+ * lapic timer advance (tscdeadline mode only) in nanoseconds.  '-1' enables
+ * adaptive tuning starting from default advancment of 1000ns.  '0' disables
+ * advancement entirely.  Any other value is used as-is and disables adaptive
+ * tuning, i.e. allows priveleged userspace to set an exact advancement time.
+ */
+static int __read_mostly lapic_timer_advance_ns = -1;
 module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
-EXPORT_SYMBOL_GPL(lapic_timer_advance_ns);
 
 static bool __read_mostly vector_hashing = true;
 module_param(vector_hashing, bool, S_IRUGO);
@@ -800,7 +804,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
-static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
 {
        if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
                        !vcpu->guest_xcr0_loaded) {
@@ -810,8 +814,9 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
                vcpu->guest_xcr0_loaded = 1;
        }
 }
+EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
 
-static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
 {
        if (vcpu->guest_xcr0_loaded) {
                if (vcpu->arch.xcr0 != host_xcr0)
@@ -819,6 +824,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
                vcpu->guest_xcr0_loaded = 0;
        }
 }
+EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
 
 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
@@ -1125,7 +1131,7 @@ static u32 msrs_to_save[] = {
 #endif
        MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
        MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
-       MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES,
+       MSR_IA32_SPEC_CTRL,
        MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
        MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
        MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
@@ -1158,6 +1164,7 @@ static u32 emulated_msrs[] = {
 
        MSR_IA32_TSC_ADJUST,
        MSR_IA32_TSCDEADLINE,
+       MSR_IA32_ARCH_CAPABILITIES,
        MSR_IA32_MISC_ENABLE,
        MSR_IA32_MCG_STATUS,
        MSR_IA32_MCG_CTL,
@@ -2443,6 +2450,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (msr_info->host_initiated)
                        vcpu->arch.microcode_version = data;
                break;
+       case MSR_IA32_ARCH_CAPABILITIES:
+               if (!msr_info->host_initiated)
+                       return 1;
+               vcpu->arch.arch_capabilities = data;
+               break;
        case MSR_EFER:
                return set_efer(vcpu, data);
        case MSR_K7_HWCR:
@@ -2747,6 +2759,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_IA32_UCODE_REV:
                msr_info->data = vcpu->arch.microcode_version;
                break;
+       case MSR_IA32_ARCH_CAPABILITIES:
+               if (!msr_info->host_initiated &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
+                       return 1;
+               msr_info->data = vcpu->arch.arch_capabilities;
+               break;
        case MSR_IA32_TSC:
                msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
                break;
@@ -3081,7 +3099,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                break;
        case KVM_CAP_NESTED_STATE:
                r = kvm_x86_ops->get_nested_state ?
-                       kvm_x86_ops->get_nested_state(NULL, 0, 0) : 0;
+                       kvm_x86_ops->get_nested_state(NULL, NULL, 0) : 0;
                break;
        default:
                break;
@@ -3516,7 +3534,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
        memset(&events->reserved, 0, sizeof(events->reserved));
 }
 
-static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
+static void kvm_smm_changed(struct kvm_vcpu *vcpu);
 
 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                                              struct kvm_vcpu_events *events)
@@ -3576,12 +3594,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                vcpu->arch.apic->sipi_vector = events->sipi_vector;
 
        if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
-               u32 hflags = vcpu->arch.hflags;
-               if (events->smi.smm)
-                       hflags |= HF_SMM_MASK;
-               else
-                       hflags &= ~HF_SMM_MASK;
-               kvm_set_hflags(vcpu, hflags);
+               if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
+                       if (events->smi.smm)
+                               vcpu->arch.hflags |= HF_SMM_MASK;
+                       else
+                               vcpu->arch.hflags &= ~HF_SMM_MASK;
+                       kvm_smm_changed(vcpu);
+               }
 
                vcpu->arch.smi_pending = events->smi.pending;
 
@@ -4258,7 +4277,7 @@ static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
 }
 
 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
-                                         u32 kvm_nr_mmu_pages)
+                                        unsigned long kvm_nr_mmu_pages)
 {
        if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
                return -EINVAL;
@@ -4272,7 +4291,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
        return 0;
 }
 
-static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
+static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
 {
        return kvm->arch.n_max_mmu_pages;
 }
@@ -5946,12 +5965,18 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
 
 static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
 {
-       kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
+       emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
 }
 
-static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase)
+static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
+                                 const char *smstate)
 {
-       return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase);
+       return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smstate);
+}
+
+static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
+{
+       kvm_smm_changed(emul_to_vcpu(ctxt));
 }
 
 static const struct x86_emulate_ops emulate_ops = {
@@ -5994,6 +6019,7 @@ static const struct x86_emulate_ops emulate_ops = {
        .get_hflags          = emulator_get_hflags,
        .set_hflags          = emulator_set_hflags,
        .pre_leave_smm       = emulator_pre_leave_smm,
+       .post_leave_smm      = emulator_post_leave_smm,
 };
 
 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -6235,16 +6261,6 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu)
        kvm_mmu_reset_context(vcpu);
 }
 
-static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
-{
-       unsigned changed = vcpu->arch.hflags ^ emul_flags;
-
-       vcpu->arch.hflags = emul_flags;
-
-       if (changed & HF_SMM_MASK)
-               kvm_smm_changed(vcpu);
-}
-
 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
                                unsigned long *db)
 {
@@ -6523,15 +6539,45 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
 
+static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.pio.count = 0;
+       return 1;
+}
+
+static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.pio.count = 0;
+
+       if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
+               return 1;
+
+       return kvm_skip_emulated_instruction(vcpu);
+}
+
 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
                            unsigned short port)
 {
        unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
        int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
                                            size, port, &val, 1);
-       /* do not return to emulator after return from userspace */
-       vcpu->arch.pio.count = 0;
-       return ret;
+       if (ret)
+               return ret;
+
+       /*
+        * Workaround userspace that relies on old KVM behavior of %rip being
+        * incremented prior to exiting to userspace to handle "OUT 0x7e".
+        */
+       if (port == 0x7e &&
+           kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
+               vcpu->arch.complete_userspace_io =
+                       complete_fast_pio_out_port_0x7e;
+               kvm_skip_emulated_instruction(vcpu);
+       } else {
+               vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
+               vcpu->arch.complete_userspace_io = complete_fast_pio_out;
+       }
+       return 0;
 }
 
 static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
@@ -6541,6 +6587,11 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
        /* We should only ever be called with arch.pio.count equal to 1 */
        BUG_ON(vcpu->arch.pio.count != 1);
 
+       if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
+               vcpu->arch.pio.count = 0;
+               return 1;
+       }
+
        /* For size less than 4 we merge, else we zero extend */
        val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
                                        : 0;
@@ -6553,7 +6604,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
                                 vcpu->arch.pio.port, &val, 1);
        kvm_register_write(vcpu, VCPU_REGS_RAX, val);
 
-       return 1;
+       return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
@@ -6572,6 +6623,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
                return ret;
        }
 
+       vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
        vcpu->arch.complete_userspace_io = complete_fast_pio_in;
 
        return 0;
@@ -6579,16 +6631,13 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
 
 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
 {
-       int ret = kvm_skip_emulated_instruction(vcpu);
+       int ret;
 
-       /*
-        * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
-        * KVM_EXIT_DEBUG here.
-        */
        if (in)
-               return kvm_fast_pio_in(vcpu, size, port) && ret;
+               ret = kvm_fast_pio_in(vcpu, size, port);
        else
-               return kvm_fast_pio_out(vcpu, size, port) && ret;
+               ret = kvm_fast_pio_out(vcpu, size, port);
+       return ret && kvm_skip_emulated_instruction(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_fast_pio);
 
@@ -7413,9 +7462,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
        put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
 }
 
+#ifdef CONFIG_X86_64
 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 {
-#ifdef CONFIG_X86_64
        struct desc_ptr dt;
        struct kvm_segment seg;
        unsigned long val;
@@ -7465,10 +7514,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 
        for (i = 0; i < 6; i++)
                enter_smm_save_seg_64(vcpu, buf, i);
-#else
-       WARN_ON_ONCE(1);
-#endif
 }
+#endif
 
 static void enter_smm(struct kvm_vcpu *vcpu)
 {
@@ -7479,9 +7526,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)
 
        trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
        memset(buf, 0, 512);
+#ifdef CONFIG_X86_64
        if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
                enter_smm_save_state_64(vcpu, buf);
        else
+#endif
                enter_smm_save_state_32(vcpu, buf);
 
        /*
@@ -7539,8 +7588,10 @@ static void enter_smm(struct kvm_vcpu *vcpu)
        kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
        kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
 
+#ifdef CONFIG_X86_64
        if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
                kvm_x86_ops->set_efer(vcpu, 0);
+#endif
 
        kvm_update_cpuid(vcpu);
        kvm_mmu_reset_context(vcpu);
@@ -7837,15 +7888,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                goto cancel_injection;
        }
 
-       kvm_load_guest_xcr0(vcpu);
-
        if (req_immediate_exit) {
                kvm_make_request(KVM_REQ_EVENT, vcpu);
                kvm_x86_ops->request_immediate_exit(vcpu);
        }
 
        trace_kvm_entry(vcpu->vcpu_id);
-       if (lapic_timer_advance_ns)
+       if (lapic_in_kernel(vcpu) &&
+           vcpu->arch.apic->lapic_timer.timer_advance_ns)
                wait_lapic_expire(vcpu);
        guest_enter_irqoff();
 
@@ -7891,8 +7941,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
 
-       kvm_put_guest_xcr0(vcpu);
-
        kvm_before_interrupt(vcpu);
        kvm_x86_ops->handle_external_intr(vcpu);
        kvm_after_interrupt(vcpu);
@@ -8733,6 +8781,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 {
+       vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
        vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
        kvm_vcpu_mtrr_init(vcpu);
        vcpu_load(vcpu);
@@ -9034,7 +9083,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 
        if (irqchip_in_kernel(vcpu->kvm)) {
                vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu);
-               r = kvm_create_lapic(vcpu);
+               r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
                if (r < 0)
                        goto fail_mmu_destroy;
        } else
@@ -9429,13 +9478,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
                                const struct kvm_memory_slot *new,
                                enum kvm_mr_change change)
 {
-       int nr_mmu_pages = 0;
-
        if (!kvm->arch.n_requested_mmu_pages)
-               nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
-
-       if (nr_mmu_pages)
-               kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
+               kvm_mmu_change_mmu_pages(kvm,
+                               kvm_mmu_calculate_default_mmu_pages(kvm));
 
        /*
         * Dirty logging tracks sptes in 4k granularity, meaning that large
index 28406aa1136d7eb772ed712f9df34ffe14290e66..534d3f28bb01a9a302d0b40d6fe6fc5483a5d97b 100644 (file)
@@ -294,8 +294,6 @@ extern u64 kvm_supported_xcr0(void);
 
 extern unsigned int min_timer_period_us;
 
-extern unsigned int lapic_timer_advance_ns;
-
 extern bool enable_vmware_backdoor;
 
 extern struct static_key kvm_no_apic_vcpu;
@@ -347,4 +345,6 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
        __this_cpu_write(current_vcpu, NULL);
 }
 
+void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
+void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
 #endif
index 140e61843a079e3da471783455414c6480574ac8..5246db42de4576e7f0bbbf5899bed8ab6f06cb74 100644 (file)
@@ -6,6 +6,18 @@
 # Produces uninteresting flaky coverage.
 KCOV_INSTRUMENT_delay.o        := n
 
+# Early boot use of cmdline; don't instrument it
+ifdef CONFIG_AMD_MEM_ENCRYPT
+KCOV_INSTRUMENT_cmdline.o := n
+KASAN_SANITIZE_cmdline.o  := n
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_cmdline.o = -pg
+endif
+
+CFLAGS_cmdline.o := $(call cc-option, -fno-stack-protector)
+endif
+
 inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
 inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
 quiet_cmd_inat_tables = GEN     $@
@@ -23,7 +35,6 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
 lib-y := delay.o misc.o cmdline.o cpu.o
 lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
 lib-y += memcpy_$(BITS).o
-lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
 lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
 lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
 lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
index db4e5aa0858b963cd1b2320e4b2725ca391c930c..b2f1822084aee637a14a5b4f950c3643cb6bad23 100644 (file)
 #include <asm/smap.h>
 #include <asm/export.h>
 
+.macro ALIGN_DESTINATION
+       /* check for bad alignment of destination */
+       movl %edi,%ecx
+       andl $7,%ecx
+       jz 102f                         /* already aligned */
+       subl $8,%ecx
+       negl %ecx
+       subl %ecx,%edx
+100:   movb (%rsi),%al
+101:   movb %al,(%rdi)
+       incq %rsi
+       incq %rdi
+       decl %ecx
+       jnz 100b
+102:
+       .section .fixup,"ax"
+103:   addl %ecx,%edx                  /* ecx is zerorest also */
+       jmp copy_user_handle_tail
+       .previous
+
+       _ASM_EXTABLE_UA(100b, 103b)
+       _ASM_EXTABLE_UA(101b, 103b)
+       .endm
+
 /*
  * copy_user_generic_unrolled - memory copy with exception handling.
  * This version is for CPUs like P4 that don't have efficient micro
@@ -193,6 +217,30 @@ ENTRY(copy_user_enhanced_fast_string)
 ENDPROC(copy_user_enhanced_fast_string)
 EXPORT_SYMBOL(copy_user_enhanced_fast_string)
 
+/*
+ * Try to copy last bytes and clear the rest if needed.
+ * Since protection fault in copy_from/to_user is not a normal situation,
+ * it is not necessary to optimize tail handling.
+ *
+ * Input:
+ * rdi destination
+ * rsi source
+ * rdx count
+ *
+ * Output:
+ * eax uncopied bytes or 0 if successful.
+ */
+ALIGN;
+copy_user_handle_tail:
+       movl %edx,%ecx
+1:     rep movsb
+2:     mov %ecx,%eax
+       ASM_CLAC
+       ret
+
+       _ASM_EXTABLE_UA(1b, 2b)
+ENDPROC(copy_user_handle_tail)
+
 /*
  * copy_user_nocache - Uncached memory copy with exception handling
  * This will force destination out of cache for more performance.
index 9baca3e054bef32acd14a538d14289adee71fee0..e7925d668b680269fb2442766deaf416dc42f9a1 100644 (file)
@@ -94,7 +94,7 @@ static unsigned do_csum(const unsigned char *buff, unsigned len)
                                    : "m" (*(unsigned long *)buff), 
                                    "r" (zero),  "0" (result));
                                --count; 
-                                       buff += 8;
+                               buff += 8;
                        }
                        result = add32_with_carry(result>>32,
                                                  result&0xffffffff); 
index 3b24dc05251c7ce908cc2be48befb971b5b8f564..9d05572370edc40f234f2813f5fc1c82020ad94f 100644 (file)
@@ -257,6 +257,7 @@ ENTRY(__memcpy_mcsafe)
        /* Copy successful. Return zero */
 .L_done_memcpy_trap:
        xorl %eax, %eax
+.L_done:
        ret
 ENDPROC(__memcpy_mcsafe)
 EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
@@ -273,7 +274,7 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
        addl    %edx, %ecx
 .E_trailing_bytes:
        mov     %ecx, %eax
-       ret
+       jmp     .L_done
 
        /*
         * For write fault handling, given the destination is unaligned,
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
deleted file mode 100644 (file)
index dc2ab6e..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * x86 semaphore implementation.
- *
- * (C) Copyright 1999 Linus Torvalds
- *
- * Portions Copyright 1999 Red Hat, Inc.
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- *
- * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
- */
-
-#include <linux/linkage.h>
-#include <asm/alternative-asm.h>
-#include <asm/frame.h>
-
-#define __ASM_HALF_REG(reg)    __ASM_SEL(reg, e##reg)
-#define __ASM_HALF_SIZE(inst)  __ASM_SEL(inst##w, inst##l)
-
-#ifdef CONFIG_X86_32
-
-/*
- * The semaphore operations have a special calling sequence that
- * allow us to do a simpler in-line version of them. These routines
- * need to convert that sequence back into the C sequence when
- * there is contention on the semaphore.
- *
- * %eax contains the semaphore pointer on entry. Save the C-clobbered
- * registers (%eax, %edx and %ecx) except %eax which is either a return
- * value or just gets clobbered. Same is true for %edx so make sure GCC
- * reloads it after the slow path, by making it hold a temporary, for
- * example see ____down_write().
- */
-
-#define save_common_regs \
-       pushl %ecx
-
-#define restore_common_regs \
-       popl %ecx
-
-       /* Avoid uglifying the argument copying x86-64 needs to do. */
-       .macro movq src, dst
-       .endm
-
-#else
-
-/*
- * x86-64 rwsem wrappers
- *
- * This interfaces the inline asm code to the slow-path
- * C routines. We need to save the call-clobbered regs
- * that the asm does not mark as clobbered, and move the
- * argument from %rax to %rdi.
- *
- * NOTE! We don't need to save %rax, because the functions
- * will always return the semaphore pointer in %rax (which
- * is also the input argument to these helpers)
- *
- * The following can clobber %rdx because the asm clobbers it:
- *   call_rwsem_down_write_failed
- *   call_rwsem_wake
- * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
- */
-
-#define save_common_regs \
-       pushq %rdi; \
-       pushq %rsi; \
-       pushq %rcx; \
-       pushq %r8;  \
-       pushq %r9;  \
-       pushq %r10; \
-       pushq %r11
-
-#define restore_common_regs \
-       popq %r11; \
-       popq %r10; \
-       popq %r9; \
-       popq %r8; \
-       popq %rcx; \
-       popq %rsi; \
-       popq %rdi
-
-#endif
-
-/* Fix up special calling conventions */
-ENTRY(call_rwsem_down_read_failed)
-       FRAME_BEGIN
-       save_common_regs
-       __ASM_SIZE(push,) %__ASM_REG(dx)
-       movq %rax,%rdi
-       call rwsem_down_read_failed
-       __ASM_SIZE(pop,) %__ASM_REG(dx)
-       restore_common_regs
-       FRAME_END
-       ret
-ENDPROC(call_rwsem_down_read_failed)
-
-ENTRY(call_rwsem_down_read_failed_killable)
-       FRAME_BEGIN
-       save_common_regs
-       __ASM_SIZE(push,) %__ASM_REG(dx)
-       movq %rax,%rdi
-       call rwsem_down_read_failed_killable
-       __ASM_SIZE(pop,) %__ASM_REG(dx)
-       restore_common_regs
-       FRAME_END
-       ret
-ENDPROC(call_rwsem_down_read_failed_killable)
-
-ENTRY(call_rwsem_down_write_failed)
-       FRAME_BEGIN
-       save_common_regs
-       movq %rax,%rdi
-       call rwsem_down_write_failed
-       restore_common_regs
-       FRAME_END
-       ret
-ENDPROC(call_rwsem_down_write_failed)
-
-ENTRY(call_rwsem_down_write_failed_killable)
-       FRAME_BEGIN
-       save_common_regs
-       movq %rax,%rdi
-       call rwsem_down_write_failed_killable
-       restore_common_regs
-       FRAME_END
-       ret
-ENDPROC(call_rwsem_down_write_failed_killable)
-
-ENTRY(call_rwsem_wake)
-       FRAME_BEGIN
-       /* do nothing if still outstanding active readers */
-       __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
-       jnz 1f
-       save_common_regs
-       movq %rax,%rdi
-       call rwsem_wake
-       restore_common_regs
-1:     FRAME_END
-       ret
-ENDPROC(call_rwsem_wake)
-
-ENTRY(call_rwsem_downgrade_wake)
-       FRAME_BEGIN
-       save_common_regs
-       __ASM_SIZE(push,) %__ASM_REG(dx)
-       movq %rax,%rdi
-       call rwsem_downgrade_wake
-       __ASM_SIZE(pop,) %__ASM_REG(dx)
-       restore_common_regs
-       FRAME_END
-       ret
-ENDPROC(call_rwsem_downgrade_wake)
index ee42bb0cbeb3f66d1e17fdec0c4994749fc55828..9952a01cad24969c8481f15e70eac338058e9ad4 100644 (file)
@@ -54,26 +54,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
 }
 EXPORT_SYMBOL(clear_user);
 
-/*
- * Try to copy last bytes and clear the rest if needed.
- * Since protection fault in copy_from/to_user is not a normal situation,
- * it is not necessary to optimize tail handling.
- */
-__visible unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len)
-{
-       for (; len; --len, to++) {
-               char c;
-
-               if (__get_user_nocheck(c, from++, sizeof(char)))
-                       break;
-               if (__put_user_nocheck(c, to, sizeof(char)))
-                       break;
-       }
-       clac();
-       return len;
-}
-
 /*
  * Similar to copy_user_handle_tail, probe for the write fault point,
  * but reuse __memcpy_mcsafe in case a new read error is encountered.
index ee8f8ab469417c6eb0f06aa6d4390a0eec8162b1..c0309ea9abee4201b1f697e9e61e20296f359736 100644 (file)
@@ -259,7 +259,8 @@ static void note_wx(struct pg_state *st)
 #endif
        /* Account the WX pages */
        st->wx_pages += npages;
-       WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %pS\n",
+       WARN_ONCE(__supported_pte_mask & _PAGE_NX,
+                 "x86/mm: Found insecure W+X mapping at address %pS\n",
                  (void *)st->start_address);
 }
 
index f905a2371080beee339dac3f5dad13d0ab1e7ef8..8dacdb96899ec5a76749751d2675b5b827855141 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/memblock.h>
 #include <linux/swapfile.h>
 #include <linux/swapops.h>
+#include <linux/kmemleak.h>
 
 #include <asm/set_memory.h>
 #include <asm/e820/api.h>
@@ -766,6 +767,11 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
        if (debug_pagealloc_enabled()) {
                pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
                        begin, end - 1);
+               /*
+                * Inform kmemleak about the hole in the memory since the
+                * corresponding pages will be unmapped.
+                */
+               kmemleak_free_part((void *)begin, end - begin);
                set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
        } else {
                /*
index 0029604af8a411397c019f066fae8dee7df8c805..dd73d5d74393f7c987e9c4c18fde1f698d9213ae 100644 (file)
@@ -825,7 +825,7 @@ void __init __early_set_fixmap(enum fixed_addresses idx,
        pte = early_ioremap_pte(addr);
 
        /* Sanitize 'prot' against any unsupported bits: */
-       pgprot_val(flags) &= __default_kernel_pte_mask;
+       pgprot_val(flags) &= __supported_pte_mask;
 
        if (pgprot_val(flags))
                set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
index 3f452ffed7e93f377aa1ae38150a1f2cf7e91a5c..d669c5e797e06e27a891f099739deb21e165673b 100644 (file)
@@ -94,7 +94,7 @@ void __init kernel_randomize_memory(void)
        if (!kaslr_memory_enabled())
                return;
 
-       kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT);
+       kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
        kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;
 
        /*
index db316571452145f50832ba56ff7fb49214d4ae02..dc726e07d8ba84a6ac790c8b0929e9235e25c2fc 100644 (file)
@@ -230,7 +230,7 @@ bool mmap_address_hint_valid(unsigned long addr, unsigned long len)
 /* Can we access it for direct reading/writing? Must be RAM: */
 int valid_phys_addr_range(phys_addr_t addr, size_t count)
 {
-       return addr + count <= __pa(high_memory);
+       return addr + count - 1 <= __pa(high_memory - 1);
 }
 
 /* Can we access it through mmap? Must be a valid physical address: */
index 4fee5c3003ed78ab9566fe92f09e0d14968c9865..d0255d64edced4f887efe21799b0f527c063aeea 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/spinlock.h>
 #include <linux/mm.h>
 #include <linux/uaccess.h>
+#include <linux/cpu.h>
 
 #include <asm/cpufeature.h>
 #include <asm/hypervisor.h>
@@ -77,7 +78,7 @@ static void __init pti_print_if_secure(const char *reason)
                pr_info("%s\n", reason);
 }
 
-enum pti_mode {
+static enum pti_mode {
        PTI_AUTO = 0,
        PTI_FORCE_OFF,
        PTI_FORCE_ON
@@ -115,7 +116,8 @@ void __init pti_check_boottime_disable(void)
                }
        }
 
-       if (cmdline_find_option_bool(boot_command_line, "nopti")) {
+       if (cmdline_find_option_bool(boot_command_line, "nopti") ||
+           cpu_mitigations_off()) {
                pti_mode = PTI_FORCE_OFF;
                pti_print_if_insecure("disabled on command line.");
                return;
@@ -602,7 +604,7 @@ static void pti_clone_kernel_text(void)
        set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
 }
 
-void pti_set_kernel_image_nonglobal(void)
+static void pti_set_kernel_image_nonglobal(void)
 {
        /*
         * The identity map is created with PMDs, regardless of the
index bc4bc7b2f075d3f302ba25dc261b759ab89dab97..487b8474c01cde006241a4c9a732bfe6aae53ff6 100644 (file)
@@ -728,7 +728,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
 {
        int cpu;
 
-       struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = {
+       struct flush_tlb_info info = {
                .mm = mm,
                .stride_shift = stride_shift,
                .freed_tables = freed_tables,
index 458a0e2bcc57ca42a10964c5b62bc8bd11ca96ea..a25a9fd987a9e5dc2ba75d7b3c91c098ad7d7034 100644 (file)
@@ -449,7 +449,7 @@ void __init efi_free_boot_services(void)
                 */
                rm_size = real_mode_size_needed();
                if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) {
-                       set_real_mode_mem(start, rm_size);
+                       set_real_mode_mem(start);
                        start += rm_size;
                        size -= rm_size;
                }
index d10105825d57a7faee5f3221d04f0e77b9807e45..7dce39c8c034a8a9f2481aff02590a793ff86498 100644 (file)
@@ -15,15 +15,6 @@ u32 *trampoline_cr4_features;
 /* Hold the pgd entry used on booting additional CPUs */
 pgd_t trampoline_pgd_entry;
 
-void __init set_real_mode_mem(phys_addr_t mem, size_t size)
-{
-       void *base = __va(mem);
-
-       real_mode_header = (struct real_mode_header *) base;
-       printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
-              base, (unsigned long long)mem, size);
-}
-
 void __init reserve_real_mode(void)
 {
        phys_addr_t mem;
@@ -42,7 +33,7 @@ void __init reserve_real_mode(void)
        }
 
        memblock_reserve(mem, size);
-       set_real_mode_mem(mem, size);
+       set_real_mode_mem(mem);
 }
 
 static void __init setup_real_mode(void)
index a9e80e44178c7818cdf03ae07fb66128c028f003..a8985e1f7432f394ca1aac200ba23e8c416dd1cc 100644 (file)
@@ -32,12 +32,6 @@ config ARCH_DEFCONFIG
        default "arch/um/configs/i386_defconfig" if X86_32
        default "arch/um/configs/x86_64_defconfig" if X86_64
 
-config RWSEM_XCHGADD_ALGORITHM
-       def_bool 64BIT
-
-config RWSEM_GENERIC_SPINLOCK
-       def_bool !RWSEM_XCHGADD_ALGORITHM
-
 config 3_LEVEL_PGTABLES
        bool "Three-level pagetables" if !64BIT
        default 64BIT
index 2d686ae54681d5a8991d22b69b1673f70e8522d1..33c51c064c77e83242e6dc62e3254af438b626d4 100644 (file)
@@ -21,14 +21,12 @@ obj-y += checksum_32.o syscalls_32.o
 obj-$(CONFIG_ELF_CORE) += elfcore.o
 
 subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o
-subarch-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += ../lib/rwsem.o
 
 else
 
 obj-y += syscalls_64.o vdso/
 
-subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o \
-               ../lib/rwsem.o
+subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o
 
 endif
 
index 4b9aafe766c58870d285975bdb3339b70628c827..35c8d91e61069df2101eeea4c01eb41648aa91cd 100644 (file)
@@ -46,9 +46,6 @@ config XTENSA
          with reasonable minimum requirements.  The Xtensa Linux project has
          a home page at <http://www.linux-xtensa.org/>.
 
-config RWSEM_XCHGADD_ALGORITHM
-       def_bool y
-
 config GENERIC_HWEIGHT
        def_bool y
 
index 42b6cb3d16f7b9666baf44daf0a63757786d729b..4148090cafb00a05a6fe5c7158190eba803a79db 100644 (file)
@@ -15,6 +15,7 @@ generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
@@ -24,7 +25,6 @@ generic-y += percpu.h
 generic-y += preempt.h
 generic-y += qrwlock.h
 generic-y += qspinlock.h
-generic-y += rwsem.h
 generic-y += sections.h
 generic-y += socket.h
 generic-y += topology.h
index f7dd895b2353e0510a7199896030c8527695c130..0c14018d1c2601a63a92b2f29be1270d9919220c 100644 (file)
@@ -187,15 +187,18 @@ struct thread_struct {
 
 /* Clearing a0 terminates the backtrace. */
 #define start_thread(regs, new_pc, new_sp) \
-       memset(regs, 0, sizeof(*regs)); \
-       regs->pc = new_pc; \
-       regs->ps = USER_PS_VALUE; \
-       regs->areg[1] = new_sp; \
-       regs->areg[0] = 0; \
-       regs->wmask = 1; \
-       regs->depc = 0; \
-       regs->windowbase = 0; \
-       regs->windowstart = 1;
+       do { \
+               memset((regs), 0, sizeof(*(regs))); \
+               (regs)->pc = (new_pc); \
+               (regs)->ps = USER_PS_VALUE; \
+               (regs)->areg[1] = (new_sp); \
+               (regs)->areg[0] = 0; \
+               (regs)->wmask = 1; \
+               (regs)->depc = 0; \
+               (regs)->windowbase = 0; \
+               (regs)->windowstart = 1; \
+               (regs)->syscall = NO_SYSCALL; \
+       } while (0)
 
 /* Forward declaration */
 struct task_struct;
index a168bf81c7f4701a036abaa251fa5dce7fad6c19..91dc06d580603bfd8025eeda2acaf09bd2260378 100644 (file)
@@ -59,45 +59,24 @@ static inline void syscall_set_return_value(struct task_struct *task,
 
 static inline void syscall_get_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
        static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
-       unsigned int j;
+       unsigned int i;
 
-       if (n == 0)
-               return;
-
-       WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS);
-
-       for (j = 0; j < n; ++j) {
-               if (i + j < SYSCALL_MAX_ARGS)
-                       args[j] = regs->areg[reg[i + j]];
-               else
-                       args[j] = 0;
-       }
+       for (i = 0; i < 6; ++i)
+               args[i] = regs->areg[reg[i]];
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
                                         struct pt_regs *regs,
-                                        unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
        static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
-       unsigned int j;
-
-       if (n == 0)
-               return;
-
-       if (WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS)) {
-               if (i < SYSCALL_MAX_ARGS)
-                       n = SYSCALL_MAX_ARGS - i;
-               else
-                       return;
-       }
+       unsigned int i;
 
-       for (j = 0; j < n; ++j)
-               regs->areg[reg[i + j]] = args[j];
+       for (i = 0; i < 6; ++i)
+               regs->areg[reg[i]] = args[i];
 }
 
 asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
index 0d766f9c1083a59cd4a073cb5da0dfc640a06415..50889935138ad2170b1e4c57aaccbf9551fdf997 100644 (file)
 #include <asm/cache.h>
 #include <asm/page.h>
 
-#if (DCACHE_WAY_SIZE <= PAGE_SIZE)
-
-/* Note, read http://lkml.org/lkml/2004/1/15/6 */
-
-# define tlb_start_vma(tlb,vma)                        do { } while (0)
-# define tlb_end_vma(tlb,vma)                  do { } while (0)
-
-#else
-
-# define tlb_start_vma(tlb, vma)                                             \
-       do {                                                                  \
-               if (!tlb->fullmm)                                             \
-                       flush_cache_range(vma, vma->vm_start, vma->vm_end);   \
-       } while(0)
-
-# define tlb_end_vma(tlb, vma)                                               \
-       do {                                                                  \
-               if (!tlb->fullmm)                                             \
-                       flush_tlb_range(vma, vma->vm_start, vma->vm_end);     \
-       } while(0)
-
-#endif
-
-#define __tlb_remove_tlb_entry(tlb,pte,addr)   do { } while (0)
-#define tlb_flush(tlb)                         flush_tlb_mm((tlb)->mm)
-
 #include <asm-generic/tlb.h>
 
 #define __pte_free_tlb(tlb, pte, address)      pte_free((tlb)->mm, pte)
index 8a7ad40be463656854310b85dbba06d5f9b8e189..7417847dc438e5ff6aff14f04094a1323d6b933f 100644 (file)
@@ -1,2 +1 @@
 generated-y += unistd_32.h
-generic-y += kvm_para.h
index e50f5124dc6f789c7457cb3b296d67f2b5cc76e1..e54af8b7e0f8c314830ae1ae5244f86af19a1682 100644 (file)
@@ -1860,6 +1860,8 @@ ENTRY(system_call)
        l32i    a7, a2, PT_SYSCALL
 
 1:
+       s32i    a7, a1, 4
+
        /* syscall = sys_call_table[syscall_nr] */
 
        movi    a4, sys_call_table
@@ -1893,8 +1895,12 @@ ENTRY(system_call)
        retw
 
 1:
+       l32i    a4, a1, 4
+       l32i    a3, a2, PT_SYSCALL
+       s32i    a4, a2, PT_SYSCALL
        mov     a6, a2
        call4   do_syscall_trace_leave
+       s32i    a3, a2, PT_SYSCALL
        retw
 
 ENDPROC(system_call)
index 174c11f13bba375472f77a02eca75b1408d5e2de..b9f82510c65019506ffb98f3f23ac494f7285efa 100644 (file)
@@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data)
        return 1;
 }
 
+/*
+ * level == 0 is for the return address from the caller of this function,
+ * not from this function itself.
+ */
 unsigned long return_address(unsigned level)
 {
        struct return_addr_data r = {
-               .skip = level + 1,
+               .skip = level,
        };
        walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
        return r.addr;
index 6af49929de857b24f9ddb134ef79efd0f7c08e72..30084eaf84227ac89eb6e5baa0aed77d6fad5f50 100644 (file)
 421    common  rt_sigtimedwait_time64          sys_rt_sigtimedwait
 422    common  futex_time64                    sys_futex
 423    common  sched_rr_get_interval_time64    sys_sched_rr_get_interval
+424    common  pidfd_send_signal               sys_pidfd_send_signal
+425    common  io_uring_setup                  sys_io_uring_setup
+426    common  io_uring_enter                  sys_io_uring_enter
+427    common  io_uring_register               sys_io_uring_register
index 2fb7d117222840da05f44cf7eed39348d27502e5..03678c4afc39b9e4ee94a3666b7ef630ad246d05 100644 (file)
@@ -33,7 +33,7 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
 
        pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
        if (!pte)
-               panic("%s: Failed to allocate %zu bytes align=%lx\n",
+               panic("%s: Failed to allocate %lu bytes align=%lx\n",
                      __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
 
        for (i = 0; i < n_pages; ++i)
index 4c592496a16a21655dcd8616ae4b3181111b3956..5ba1e0d841b4d552e3858ad30888e489669772bd 100644 (file)
@@ -674,7 +674,7 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
         * at least two nodes.
         */
        return !(varied_queue_weights || multiple_classes_busy
-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
               || bfqd->num_groups_with_pending_reqs > 0
 #endif
                );
@@ -2822,7 +2822,7 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
        bfq_remove_request(q, rq);
 }
 
-static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
 {
        /*
         * If this bfqq is shared between multiple processes, check
@@ -2855,9 +2855,11 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
        /*
         * All in-service entities must have been properly deactivated
         * or requeued before executing the next function, which
-        * resets all in-service entites as no more in service.
+        * resets all in-service entities as no more in service. This
+        * may cause bfqq to be freed. If this happens, the next
+        * function returns true.
         */
-       __bfq_bfqd_reset_in_service(bfqd);
+       return __bfq_bfqd_reset_in_service(bfqd);
 }
 
 /**
@@ -3262,7 +3264,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
        bool slow;
        unsigned long delta = 0;
        struct bfq_entity *entity = &bfqq->entity;
-       int ref;
 
        /*
         * Check whether the process is slow (see bfq_bfqq_is_slow).
@@ -3347,10 +3348,8 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
         * reason.
         */
        __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
-       ref = bfqq->ref;
-       __bfq_bfqq_expire(bfqd, bfqq);
-
-       if (ref == 1) /* bfqq is gone, no more actions on it */
+       if (__bfq_bfqq_expire(bfqd, bfqq))
+               /* bfqq is gone, no more actions on it */
                return;
 
        bfqq->injected_service = 0;
@@ -5397,7 +5396,7 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
        return min_shallow;
 }
 
-static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
 {
        struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
        struct blk_mq_tags *tags = hctx->sched_tags;
@@ -5405,6 +5404,11 @@ static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
 
        min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
        sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
+}
+
+static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+{
+       bfq_depth_updated(hctx);
        return 0;
 }
 
@@ -5827,6 +5831,7 @@ static struct elevator_type iosched_bfq_mq = {
                .requests_merged        = bfq_requests_merged,
                .request_merged         = bfq_request_merged,
                .has_work               = bfq_has_work,
+               .depth_updated          = bfq_depth_updated,
                .init_hctx              = bfq_init_hctx,
                .init_sched             = bfq_init_queue,
                .exit_sched             = bfq_exit_queue,
index 062e1c4787f4a9e66ac4df54d24c17d9f92c6577..86394e503ca9c0487a66d40deaa09643148ae3df 100644 (file)
@@ -995,7 +995,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity,
                             bool ins_into_idle_tree);
 bool next_queue_may_preempt(struct bfq_data *bfqd);
 struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd);
-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
+bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                         bool ins_into_idle_tree, bool expiration);
 void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
index 63311d1ff1edf41823ef2790ac3175535d2f2ef5..ae4d000ac0af1c38a49c28e824ca843bfb3c531d 100644 (file)
@@ -1012,7 +1012,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
                entity->on_st = true;
        }
 
-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
        if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
                struct bfq_group *bfqg =
                        container_of(entity, struct bfq_group, entity);
@@ -1605,7 +1605,8 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
        return bfqq;
 }
 
-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+/* returns true if the in-service queue gets freed */
+bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
 {
        struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
        struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
@@ -1629,8 +1630,20 @@ void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
         * service tree either, then release the service reference to
         * the queue it represents (taken with bfq_get_entity).
         */
-       if (!in_serv_entity->on_st)
+       if (!in_serv_entity->on_st) {
+               /*
+                * If no process is referencing in_serv_bfqq any
+                * longer, then the service reference may be the only
+                * reference to the queue. If this is the case, then
+                * bfqq gets freed here.
+                */
+               int ref = in_serv_bfqq->ref;
                bfq_put_queue(in_serv_bfqq);
+               if (ref == 1)
+                       return true;
+       }
+
+       return false;
 }
 
 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
index 71a78d9fb8b722d767f9062813e344a3bc8b90b7..716510ecd7ffa3f0535b49e3a443ea1ebe38ec12 100644 (file)
@@ -849,20 +849,14 @@ static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
        size = bio_add_page(bio, bv->bv_page, len,
                                bv->bv_offset + iter->iov_offset);
        if (size == len) {
-               struct page *page;
-               int i;
+               if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
+                       struct page *page;
+                       int i;
+
+                       mp_bvec_for_each_page(page, bv, i)
+                               get_page(page);
+               }
 
-               /*
-                * For the normal O_DIRECT case, we could skip grabbing this
-                * reference and then not have to put them again when IO
-                * completes. But this breaks some in-kernel users, like
-                * splicing to/from a loop device, where we release the pipe
-                * pages unconditionally. If we can fix that case, we can
-                * get rid of the get here and the need to call
-                * bio_release_pages() at IO completion time.
-                */
-               mp_bvec_for_each_page(page, bv, i)
-                       get_page(page);
                iov_iter_advance(iter, size);
                return 0;
        }
@@ -925,10 +919,12 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
  * This takes either an iterator pointing to user memory, or one pointing to
  * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
  * map them into the kernel. On IO completion, the caller should put those
- * pages. For now, when adding kernel pages, we still grab a reference to the
- * page. This isn't strictly needed for the common case, but some call paths
- * end up releasing pages from eg a pipe and we can't easily control these.
- * See comment in __bio_iov_bvec_add_pages().
+ * pages. If we're adding kernel pages, and the caller told us it's safe to
+ * do so, we just have to add the pages to the bio directly. We don't grab an
+ * extra reference to those pages (the user should already have that), and we
+ * don't put the page on IO completion. The caller needs to check if the bio is
+ * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
+ * released.
  *
  * The function tries, but does not guarantee, to pin as many pages as
  * fit into the bio, or are requested in *iter, whatever is smaller. If
@@ -940,6 +936,13 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
        const bool is_bvec = iov_iter_is_bvec(iter);
        unsigned short orig_vcnt = bio->bi_vcnt;
 
+       /*
+        * If this is a BVEC iter, then the pages are kernel pages. Don't
+        * release them on IO completion, if the caller asked us to.
+        */
+       if (is_bvec && iov_iter_bvec_no_ref(iter))
+               bio_set_flag(bio, BIO_NO_PAGE_REF);
+
        do {
                int ret;
 
@@ -1295,8 +1298,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
                        }
                }
 
-               if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
+               if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
+                       if (!map_data)
+                               __free_page(page);
                        break;
+               }
 
                len -= bytes;
                offset = 0;
@@ -1696,7 +1702,8 @@ static void bio_dirty_fn(struct work_struct *work)
                next = bio->bi_private;
 
                bio_set_pages_dirty(bio);
-               bio_release_pages(bio);
+               if (!bio_flagged(bio, BIO_NO_PAGE_REF))
+                       bio_release_pages(bio);
                bio_put(bio);
        }
 }
@@ -1713,7 +1720,8 @@ void bio_check_pages_dirty(struct bio *bio)
                        goto defer;
        }
 
-       bio_release_pages(bio);
+       if (!bio_flagged(bio, BIO_NO_PAGE_REF))
+               bio_release_pages(bio);
        bio_put(bio);
        return;
 defer:
index 77f37ef8ef06a68da12d380eec7f790030313ad4..617a2b3f758219b3dc7d9b6a592a68eea8edb060 100644 (file)
@@ -1736,8 +1736,8 @@ out:
 
 /**
  * blkcg_schedule_throttle - this task needs to check for throttling
- * @q - the request queue IO was submitted on
- * @use_memdelay - do we charge this to memory delay for PSI
+ * @q: the request queue IO was submitted on
+ * @use_memdelay: do we charge this to memory delay for PSI
  *
  * This is called by the IO controller when we know there's delay accumulated
  * for the blkg for this task.  We do not pass the blkg because there are places
@@ -1769,8 +1769,9 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
 
 /**
  * blkcg_add_delay - add delay to this blkg
- * @now - the current time in nanoseconds
- * @delta - how many nanoseconds of delay to add
+ * @blkg: blkg of interest
+ * @now: the current time in nanoseconds
+ * @delta: how many nanoseconds of delay to add
  *
  * Charge @delta to the blkg's current delay accumulation.  This is used to
  * throttle tasks if an IO controller thinks we need more throttling.
index 4673ebe4225534dc9965089ba76ad127963dcb0f..a55389ba877964e5ad69f173ed0a3fa001bb9936 100644 (file)
@@ -1245,8 +1245,6 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
  */
 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 {
-       blk_qc_t unused;
-
        if (blk_cloned_rq_check_limits(q, rq))
                return BLK_STS_IOERR;
 
@@ -1262,7 +1260,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
         * bypass a potential scheduler on the bottom device for
         * insert.
         */
-       return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true);
+       return blk_mq_request_issue_directly(rq, true);
 }
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
 
index 6e0f2d97fc6d8f0a5b14e6dbea23f817706bef7a..d95f9489201526081abf8b8bdcc65a525cf4c179 100644 (file)
@@ -220,7 +220,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
                blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
                flush_rq->tag = -1;
        } else {
-               blk_mq_put_driver_tag_hctx(hctx, flush_rq);
+               blk_mq_put_driver_tag(flush_rq);
                flush_rq->internal_tag = -1;
        }
 
@@ -324,7 +324,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
 
        if (q->elevator) {
                WARN_ON(rq->tag < 0);
-               blk_mq_put_driver_tag_hctx(hctx, rq);
+               blk_mq_put_driver_tag(rq);
        }
 
        /*
index 2620baa1f6993db5e6706a36240ab4c6a2931038..507212d75ee2c473c1ca8fd38f9eeedc1d6894ee 100644 (file)
@@ -75,6 +75,7 @@
 #include <linux/blk-mq.h>
 #include "blk-rq-qos.h"
 #include "blk-stat.h"
+#include "blk.h"
 
 #define DEFAULT_SCALE_COOKIE 1000000U
 
index 40905539afed347ebb7882d7e02c824c096e16ce..aa6bc5c0264388a549956c3f8acb57c1d144fb5f 100644 (file)
@@ -423,10 +423,12 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
                 * busy in case of 'none' scheduler, and this way may save
                 * us one extra enqueue & dequeue to sw queue.
                 */
-               if (!hctx->dispatch_busy && !e && !run_queue_async)
+               if (!hctx->dispatch_busy && !e && !run_queue_async) {
                        blk_mq_try_issue_list_directly(hctx, list);
-               else
-                       blk_mq_insert_requests(hctx, ctx, list);
+                       if (list_empty(list))
+                               return;
+               }
+               blk_mq_insert_requests(hctx, ctx, list);
        }
 
        blk_mq_run_hw_queue(hctx, run_queue_async);
index a9c181603cbda2df9f9b59d9c447b19d3f9c2889..fc60ed7e940ead5ae7d7332ee9f64b9ffe922aca 100644 (file)
@@ -59,7 +59,8 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
 }
 
 /*
- * Check if any of the ctx's have pending work in this hardware queue
+ * Check if any of the ctx, dispatch list or elevator
+ * have pending work in this hardware queue.
  */
 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
 {
@@ -653,6 +654,13 @@ bool blk_mq_complete_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
+void blk_mq_complete_request_sync(struct request *rq)
+{
+       WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+       rq->q->mq_ops->complete(rq);
+}
+EXPORT_SYMBOL_GPL(blk_mq_complete_request_sync);
+
 int blk_mq_request_started(struct request *rq)
 {
        return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
@@ -782,7 +790,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
        if (kick_requeue_list)
                blk_mq_kick_requeue_list(q);
 }
-EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
 
 void blk_mq_kick_requeue_list(struct request_queue *q)
 {
@@ -1072,7 +1079,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
        hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
 
        spin_lock(&hctx->dispatch_wait_lock);
-       list_del_init(&wait->entry);
+       if (!list_empty(&wait->entry)) {
+               struct sbitmap_queue *sbq;
+
+               list_del_init(&wait->entry);
+               sbq = &hctx->tags->bitmap_tags;
+               atomic_dec(&sbq->ws_active);
+       }
        spin_unlock(&hctx->dispatch_wait_lock);
 
        blk_mq_run_hw_queue(hctx, true);
@@ -1088,13 +1101,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
                                 struct request *rq)
 {
+       struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
        struct wait_queue_head *wq;
        wait_queue_entry_t *wait;
        bool ret;
 
        if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
-               if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
-                       set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+               blk_mq_sched_mark_restart_hctx(hctx);
 
                /*
                 * It's possible that a tag was freed in the window between the
@@ -1111,7 +1124,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
        if (!list_empty_careful(&wait->entry))
                return false;
 
-       wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait;
+       wq = &bt_wait_ptr(sbq, hctx)->wait;
 
        spin_lock_irq(&wq->lock);
        spin_lock(&hctx->dispatch_wait_lock);
@@ -1121,6 +1134,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
                return false;
        }
 
+       atomic_inc(&sbq->ws_active);
        wait->flags &= ~WQ_FLAG_EXCLUSIVE;
        __add_wait_queue(wq, wait);
 
@@ -1141,6 +1155,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
         * someone else gets the wakeup.
         */
        list_del_init(&wait->entry);
+       atomic_dec(&sbq->ws_active);
        spin_unlock(&hctx->dispatch_wait_lock);
        spin_unlock_irq(&wq->lock);
 
@@ -1703,11 +1718,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
        unsigned int depth;
 
        list_splice_init(&plug->mq_list, &list);
-       plug->rq_count = 0;
 
        if (plug->rq_count > 2 && plug->multiple_queues)
                list_sort(NULL, &list, plug_rq_cmp);
 
+       plug->rq_count = 0;
+
        this_q = NULL;
        this_hctx = NULL;
        this_ctx = NULL;
@@ -1792,74 +1808,76 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
        return ret;
 }
 
-blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
                                                struct request *rq,
                                                blk_qc_t *cookie,
-                                               bool bypass, bool last)
+                                               bool bypass_insert, bool last)
 {
        struct request_queue *q = rq->q;
        bool run_queue = true;
-       blk_status_t ret = BLK_STS_RESOURCE;
-       int srcu_idx;
-       bool force = false;
 
-       hctx_lock(hctx, &srcu_idx);
        /*
-        * hctx_lock is needed before checking quiesced flag.
+        * RCU or SRCU read lock is needed before checking quiesced flag.
         *
-        * When queue is stopped or quiesced, ignore 'bypass', insert
-        * and return BLK_STS_OK to caller, and avoid driver to try to
-        * dispatch again.
+        * When queue is stopped or quiesced, ignore 'bypass_insert' from
+        * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
+        * and avoid driver to try to dispatch again.
         */
-       if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
+       if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
                run_queue = false;
-               bypass = false;
-               goto out_unlock;
+               bypass_insert = false;
+               goto insert;
        }
 
-       if (unlikely(q->elevator && !bypass))
-               goto out_unlock;
+       if (q->elevator && !bypass_insert)
+               goto insert;
 
        if (!blk_mq_get_dispatch_budget(hctx))
-               goto out_unlock;
+               goto insert;
 
        if (!blk_mq_get_driver_tag(rq)) {
                blk_mq_put_dispatch_budget(hctx);
-               goto out_unlock;
+               goto insert;
        }
 
-       /*
-        * Always add a request that has been through
-        *.queue_rq() to the hardware dispatch list.
-        */
-       force = true;
-       ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
-out_unlock:
+       return __blk_mq_issue_directly(hctx, rq, cookie, last);
+insert:
+       if (bypass_insert)
+               return BLK_STS_RESOURCE;
+
+       blk_mq_request_bypass_insert(rq, run_queue);
+       return BLK_STS_OK;
+}
+
+static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+               struct request *rq, blk_qc_t *cookie)
+{
+       blk_status_t ret;
+       int srcu_idx;
+
+       might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
+
+       hctx_lock(hctx, &srcu_idx);
+
+       ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
+       if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
+               blk_mq_request_bypass_insert(rq, true);
+       else if (ret != BLK_STS_OK)
+               blk_mq_end_request(rq, ret);
+
+       hctx_unlock(hctx, srcu_idx);
+}
+
+blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
+{
+       blk_status_t ret;
+       int srcu_idx;
+       blk_qc_t unused_cookie;
+       struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+
+       hctx_lock(hctx, &srcu_idx);
+       ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
        hctx_unlock(hctx, srcu_idx);
-       switch (ret) {
-       case BLK_STS_OK:
-               break;
-       case BLK_STS_DEV_RESOURCE:
-       case BLK_STS_RESOURCE:
-               if (force) {
-                       blk_mq_request_bypass_insert(rq, run_queue);
-                       /*
-                        * We have to return BLK_STS_OK for the DM
-                        * to avoid livelock. Otherwise, we return
-                        * the real result to indicate whether the
-                        * request is direct-issued successfully.
-                        */
-                       ret = bypass ? BLK_STS_OK : ret;
-               } else if (!bypass) {
-                       blk_mq_sched_insert_request(rq, false,
-                                                   run_queue, false);
-               }
-               break;
-       default:
-               if (!bypass)
-                       blk_mq_end_request(rq, ret);
-               break;
-       }
 
        return ret;
 }
@@ -1867,20 +1885,22 @@ out_unlock:
 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                struct list_head *list)
 {
-       blk_qc_t unused;
-       blk_status_t ret = BLK_STS_OK;
-
        while (!list_empty(list)) {
+               blk_status_t ret;
                struct request *rq = list_first_entry(list, struct request,
                                queuelist);
 
                list_del_init(&rq->queuelist);
-               if (ret == BLK_STS_OK)
-                       ret = blk_mq_try_issue_directly(hctx, rq, &unused,
-                                                       false,
+               ret = blk_mq_request_issue_directly(rq, list_empty(list));
+               if (ret != BLK_STS_OK) {
+                       if (ret == BLK_STS_RESOURCE ||
+                                       ret == BLK_STS_DEV_RESOURCE) {
+                               blk_mq_request_bypass_insert(rq,
                                                        list_empty(list));
-               else
-                       blk_mq_sched_insert_request(rq, false, true, false);
+                               break;
+                       }
+                       blk_mq_end_request(rq, ret);
+               }
        }
 
        /*
@@ -1888,7 +1908,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
         * the driver there was more coming, but that turned out to
         * be a lie.
         */
-       if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs)
+       if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
                hctx->queue->mq_ops->commit_rqs(hctx);
 }
 
@@ -1995,19 +2015,21 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                        plug->rq_count--;
                }
                blk_add_rq_to_plug(plug, rq);
+               trace_block_plug(q);
 
                blk_mq_put_ctx(data.ctx);
 
                if (same_queue_rq) {
                        data.hctx = same_queue_rq->mq_hctx;
+                       trace_block_unplug(q, 1, true);
                        blk_mq_try_issue_directly(data.hctx, same_queue_rq,
-                                       &cookie, false, true);
+                                       &cookie);
                }
        } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
                        !data.hctx->dispatch_busy)) {
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
-               blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
+               blk_mq_try_issue_directly(data.hctx, rq, &cookie);
        } else {
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
@@ -2324,7 +2346,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
        return 0;
 
  free_fq:
-       kfree(hctx->fq);
+       blk_free_flush_queue(hctx->fq);
  exit_hctx:
        if (set->ops->exit_hctx)
                set->ops->exit_hctx(hctx, hctx_idx);
@@ -2857,7 +2879,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        /*
         * Default to classic polling
         */
-       q->poll_nsec = -1;
+       q->poll_nsec = BLK_MQ_POLL_CLASSIC;
 
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
        blk_mq_add_queue_tag_set(set, q);
@@ -3113,6 +3135,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
                }
                if (ret)
                        break;
+               if (q->elevator && q->elevator->type->ops.depth_updated)
+                       q->elevator->type->ops.depth_updated(hctx);
        }
 
        if (!ret)
@@ -3392,7 +3416,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
 {
        struct request *rq;
 
-       if (q->poll_nsec == -1)
+       if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
                return false;
 
        if (!blk_qc_t_is_internal(cookie))
index c11353a3749dc3422cb564e848e7225634414a6c..423ea88ab6fbaac08b4fe1367e6a778fdb70641a 100644 (file)
@@ -41,6 +41,8 @@ void blk_mq_free_queue(struct request_queue *q);
 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
 void blk_mq_wake_waiters(struct request_queue *q);
 bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
+                               bool kick_requeue_list);
 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
 bool blk_mq_get_driver_tag(struct request *rq);
 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
@@ -68,10 +70,8 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
                                struct list_head *list);
 
-blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
-                                               struct request *rq,
-                                               blk_qc_t *cookie,
-                                               bool bypass, bool last);
+/* Used by blk_insert_cloned_request() to issue request directly */
+blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                                    struct list_head *list);
 
@@ -222,15 +222,6 @@ static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
        }
 }
 
-static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
-                                      struct request *rq)
-{
-       if (rq->tag == -1 || rq->internal_tag == -1)
-               return;
-
-       __blk_mq_put_driver_tag(hctx, rq);
-}
-
 static inline void blk_mq_put_driver_tag(struct request *rq)
 {
        if (rq->tag == -1 || rq->internal_tag == -1)
index 59685918167e51d80a2808861b3f2f7744dec3a7..422327089e0fd963dbfaf346be68f7951da15df2 100644 (file)
@@ -360,8 +360,8 @@ static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
 {
        int val;
 
-       if (q->poll_nsec == -1)
-               val = -1;
+       if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
+               val = BLK_MQ_POLL_CLASSIC;
        else
                val = q->poll_nsec / 1000;
 
@@ -380,10 +380,12 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
        if (err < 0)
                return err;
 
-       if (val == -1)
-               q->poll_nsec = -1;
-       else
+       if (val == BLK_MQ_POLL_CLASSIC)
+               q->poll_nsec = BLK_MQ_POLL_CLASSIC;
+       else if (val >= 0)
                q->poll_nsec = val * 1000;
+       else
+               return -EINVAL;
 
        return count;
 }
index 0430ccd08728655ecf4e44c65ef16e956bd928c1..08a0e458bc3e62dcb17e0dbec9151290b895aa33 100644 (file)
@@ -212,8 +212,12 @@ static void crypt_done(struct crypto_async_request *areq, int err)
 {
        struct skcipher_request *req = areq->data;
 
-       if (!err)
+       if (!err) {
+               struct rctx *rctx = skcipher_request_ctx(req);
+
+               rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
                err = xor_tweak_post(req);
+       }
 
        skcipher_request_complete(req, err);
 }
index f267633cf13ac83e81d7c1e8c7462e18228a2e38..d18a37629f0537271fcfd95912e0b9f1eac2133e 100644 (file)
@@ -5634,7 +5634,49 @@ static const struct hash_testvec poly1305_tv_template[] = {
                .psize          = 80,
                .digest         = "\x13\x00\x00\x00\x00\x00\x00\x00"
                                  "\x00\x00\x00\x00\x00\x00\x00\x00",
-       },
+       }, { /* Regression test for overflow in AVX2 implementation */
+               .plaintext      = "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
+                                 "\xff\xff\xff\xff",
+               .psize          = 300,
+               .digest         = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
+                                 "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
+       }
 };
 
 /* NHPoly1305 test vectors from https://github.com/google/adiantum */
index 847f54f7678972dcc50c06b867ac2842a7535b4a..2f948328cabbd97f8504941b4fd50cce1122b086 100644 (file)
@@ -137,8 +137,12 @@ static void crypt_done(struct crypto_async_request *areq, int err)
 {
        struct skcipher_request *req = areq->data;
 
-       if (!err)
+       if (!err) {
+               struct rctx *rctx = skcipher_request_ctx(req);
+
+               rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
                err = xor_tweak_post(req);
+       }
 
        skcipher_request_complete(req, err);
 }
index 8638f43cfc3d87184c9a0cc91318f07ab5abff8a..79d86da1c8924a971bacf928008634c8aa3224ca 100644 (file)
@@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
                }
        }
 
+       if (obj_desc->common.type == ACPI_TYPE_REGION) {
+               acpi_ut_remove_address_range(obj_desc->region.space_id, node);
+       }
+
        /* Clear the Node entry in all cases */
 
        node->object = NULL;
index 6ecbbabf12330c316d3e28cdbef52e72548b6ef3..eec263c9019e4bd9d200cf6903857f620da962fb 100644 (file)
@@ -1043,9 +1043,6 @@ void __init acpi_early_init(void)
 
        acpi_permanent_mmap = true;
 
-       /* Initialize debug output. Linux does not use ACPICA defaults */
-       acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR;
-
 #ifdef CONFIG_X86
        /*
         * If the machine falls into the DMI check table,
index 1b207fca1420bbc5c40469e1b3558cb38a57c450..d4244e7d0e38f05cef4e5b97df55d3a9946837c1 100644 (file)
@@ -1150,8 +1150,13 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
        cpc_read(cpunum, nominal_reg, &nom);
        perf_caps->nominal_perf = nom;
 
-       cpc_read(cpunum, guaranteed_reg, &guaranteed);
-       perf_caps->guaranteed_perf = guaranteed;
+       if (guaranteed_reg->type != ACPI_TYPE_BUFFER  ||
+           IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
+               perf_caps->guaranteed_perf = 0;
+       } else {
+               cpc_read(cpunum, guaranteed_reg, &guaranteed);
+               perf_caps->guaranteed_perf = guaranteed;
+       }
 
        cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
        perf_caps->lowest_nonlinear_perf = min_nonlinear;
index 5a389a4f4f652edda26c109baf5e595bf6325903..f1ed0befe303d241c4537e446daad3726e62dbb4 100644 (file)
@@ -567,6 +567,12 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                goto out;
        }
 
+       dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
+                       cmd_name, out_obj->buffer.length);
+       print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
+                       out_obj->buffer.pointer,
+                       min_t(u32, 128, out_obj->buffer.length), true);
+
        if (call_pkg) {
                call_pkg->nd_fw_size = out_obj->buffer.length;
                memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
@@ -585,12 +591,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                return 0;
        }
 
-       dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
-                       cmd_name, out_obj->buffer.length);
-       print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
-                       out_obj->buffer.pointer,
-                       min_t(u32, 128, out_obj->buffer.length), true);
-
        for (i = 0, offset = 0; i < desc->out_num; i++) {
                u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
                                (u32 *) out_obj->buffer.pointer,
index f70de71f79d6a699442f430dfa6606ad18a8f2dc..cddd0fcf622c3314f7115e86124e9dde5a5f98ff 100644 (file)
@@ -122,9 +122,8 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
        if (!test_bit(cmd, &nfit_mem->dsm_mask))
                return -ENOTTY;
 
-       if (old_data)
-               memcpy(nd_cmd.cmd.old_pass, old_data->data,
-                               sizeof(nd_cmd.cmd.old_pass));
+       memcpy(nd_cmd.cmd.old_pass, old_data->data,
+                       sizeof(nd_cmd.cmd.old_pass));
        memcpy(nd_cmd.cmd.new_pass, new_data->data,
                        sizeof(nd_cmd.cmd.new_pass));
        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -336,9 +335,8 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
 
        /* flush all cache before we erase DIMM */
        nvdimm_invalidate_cache();
-       if (nkey)
-               memcpy(nd_cmd.cmd.passphrase, nkey->data,
-                               sizeof(nd_cmd.cmd.passphrase));
+       memcpy(nd_cmd.cmd.passphrase, nkey->data,
+                       sizeof(nd_cmd.cmd.passphrase));
        rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
        if (rc < 0)
                return rc;
index 78db97687f26a1512130ffadda01f3372dd4ae34..c4b06cc075f937f8a4c8b4c7b76cb3344b25d0f0 100644 (file)
@@ -800,6 +800,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
        match.hrv = hrv;
 
        dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
+       put_device(dev);
        return !!dev;
 }
 EXPORT_SYMBOL(acpi_dev_present);
index 8685882da64cdaf60dcbac09d9c61735905b5300..4b9c7ca492e6db85dad979a67c7baed7cedd972d 100644 (file)
@@ -2057,7 +2057,8 @@ static size_t binder_get_object(struct binder_proc *proc,
        size_t object_size = 0;
 
        read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
-       if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32)))
+       if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+           !IS_ALIGNED(offset, sizeof(u32)))
                return 0;
        binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
                                      offset, read_size);
index 6389467670a0bc171522a2035ae4788bb700d616..195f120c4e8c9aefa9f6e57e8ce400a8ddde95fb 100644 (file)
@@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
        index = page - alloc->pages;
        page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+
+       mm = alloc->vma_vm_mm;
+       if (!mmget_not_zero(mm))
+               goto err_mmget;
+       if (!down_write_trylock(&mm->mmap_sem))
+               goto err_down_write_mmap_sem_failed;
        vma = binder_alloc_get_vma(alloc);
-       if (vma) {
-               if (!mmget_not_zero(alloc->vma_vm_mm))
-                       goto err_mmget;
-               mm = alloc->vma_vm_mm;
-               if (!down_read_trylock(&mm->mmap_sem))
-                       goto err_down_write_mmap_sem_failed;
-       }
 
        list_lru_isolate(lru, item);
        spin_unlock(lock);
@@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
                zap_page_range(vma, page_addr, PAGE_SIZE);
 
                trace_binder_unmap_user_end(alloc, index);
-
-               up_read(&mm->mmap_sem);
-               mmput(mm);
        }
+       up_write(&mm->mmap_sem);
+       mmput(mm);
 
        trace_binder_unmap_kernel_start(alloc, index);
 
index b3ed8f9953a862ea3ae67ef065ca5469330a44e0..173e6f2dd9af0f12afdc1fee7e372cfa4291e0aa 100644 (file)
@@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev)
 /* Per the spec, only slot type and drawer type ODD can be supported */
 static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
 {
-       char buf[16];
+       char *buf;
        unsigned int ret;
-       struct rm_feature_desc *desc = (void *)(buf + 8);
+       struct rm_feature_desc *desc;
        struct ata_taskfile tf;
        static const char cdb[] = {  GPCMD_GET_CONFIGURATION,
                        2,      /* only 1 feature descriptor requested */
                        0, 3,   /* 3, removable medium feature */
                        0, 0, 0,/* reserved */
-                       0, sizeof(buf),
+                       0, 16,
                        0, 0, 0,
        };
 
+       buf = kzalloc(16, GFP_KERNEL);
+       if (!buf)
+               return ODD_MECH_TYPE_UNSUPPORTED;
+       desc = (void *)(buf + 8);
+
        ata_tf_init(dev, &tf);
        tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
        tf.command = ATA_CMD_PACKET;
        tf.protocol = ATAPI_PROT_PIO;
-       tf.lbam = sizeof(buf);
+       tf.lbam = 16;
 
        ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
-                               buf, sizeof(buf), 0);
-       if (ret)
+                               buf, 16, 0);
+       if (ret) {
+               kfree(buf);
                return ODD_MECH_TYPE_UNSUPPORTED;
+       }
 
-       if (be16_to_cpu(desc->feature_code) != 3)
+       if (be16_to_cpu(desc->feature_code) != 3) {
+               kfree(buf);
                return ODD_MECH_TYPE_UNSUPPORTED;
+       }
 
-       if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
+       if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
+               kfree(buf);
                return ODD_MECH_TYPE_SLOT;
-       else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
+       } else if (desc->mech_type == 1 && desc->load == 0 &&
+                  desc->eject == 1) {
+               kfree(buf);
                return ODD_MECH_TYPE_DRAWER;
-       else
+       } else {
+               kfree(buf);
                return ODD_MECH_TYPE_UNSUPPORTED;
+       }
 }
 
 /* Test if ODD is zero power ready by sense code */
index 11e1663bdc4dee0e2cfd7cd9ba61783d00277bbf..b2c06da4f62e336ce262f1445e3329803da4d5cb 100644 (file)
@@ -1646,7 +1646,7 @@ static irqreturn_t fs_irq (int irq, void *dev_id)
        }
 
        if (status & ISR_TBRQ_W) {
-               fs_dprintk (FS_DEBUG_IRQ, "Data tramsitted!\n");
+               fs_dprintk (FS_DEBUG_IRQ, "Data transmitted!\n");
                process_txdone_queue (dev, &dev->tx_relq);
        }
 
index 57410f9c5d44cc11b9156159a49bacf6173168ab..c52c738e554a2dfe8e660973ce8a2685d9a1934b 100644 (file)
@@ -164,9 +164,7 @@ config ARM_CHARLCD
          line and the Linux version on the second line, but that's
          still useful.
 
-endif # AUXDISPLAY
-
-menuconfig PANEL
+menuconfig PARPORT_PANEL
        tristate "Parallel port LCD/Keypad Panel support"
        depends on PARPORT
        select CHARLCD
@@ -178,7 +176,7 @@ menuconfig PANEL
          compiled as a module, or linked into the kernel and started at boot.
          If you don't understand what all this is about, say N.
 
-if PANEL
+if PARPORT_PANEL
 
 config PANEL_PARPORT
        int "Default parallel port number (0=LPT1)"
@@ -419,8 +417,11 @@ config PANEL_LCD_PIN_BL
 
          Default for the 'BL' pin in custom profile is '0' (uncontrolled).
 
+endif # PARPORT_PANEL
+
 config PANEL_CHANGE_MESSAGE
        bool "Change LCD initialization message ?"
+       depends on CHARLCD
        default "n"
        ---help---
          This allows you to replace the boot message indicating the kernel version
@@ -444,7 +445,34 @@ config PANEL_BOOT_MESSAGE
          An empty message will only clear the display at driver init time. Any other
          printf()-formatted message is valid with newline and escape codes.
 
-endif # PANEL
+choice
+       prompt "Backlight initial state"
+       default CHARLCD_BL_FLASH
+
+       config CHARLCD_BL_OFF
+               bool "Off"
+               help
+                 Backlight is initially turned off
+
+       config CHARLCD_BL_ON
+               bool "On"
+               help
+                 Backlight is initially turned on
+
+       config CHARLCD_BL_FLASH
+               bool "Flash"
+               help
+                 Backlight is flashed briefly on init
+
+endchoice
+
+endif # AUXDISPLAY
+
+config PANEL
+       tristate "Parallel port LCD/Keypad Panel support (OLD OPTION)"
+       depends on PARPORT
+       select AUXDISPLAY
+       select PARPORT_PANEL
 
 config CHARLCD
        tristate "Character LCD core support" if COMPILE_TEST
index 7ac6776ca3f674683c538aca2d7473198e01a3a9..cf54b5efb07e00ae1249dfbc95ef4c88a0f08386 100644 (file)
@@ -10,4 +10,4 @@ obj-$(CONFIG_CFAG12864B)      += cfag12864b.o cfag12864bfb.o
 obj-$(CONFIG_IMG_ASCII_LCD)    += img-ascii-lcd.o
 obj-$(CONFIG_HD44780)          += hd44780.o
 obj-$(CONFIG_HT16K33)          += ht16k33.o
-obj-$(CONFIG_PANEL)             += panel.o
+obj-$(CONFIG_PARPORT_PANEL)    += panel.o
index 60e0b772673f3bd0c631efb6e45b0f624f75aa8a..92745efefb540e5d723f5bbaa3aefd11735b0f29 100644 (file)
@@ -91,7 +91,7 @@ struct charlcd_priv {
        unsigned long long drvdata[0];
 };
 
-#define to_priv(p)     container_of(p, struct charlcd_priv, lcd)
+#define charlcd_to_priv(p)     container_of(p, struct charlcd_priv, lcd)
 
 /* Device single-open policy control */
 static atomic_t charlcd_available = ATOMIC_INIT(1);
@@ -105,7 +105,7 @@ static void long_sleep(int ms)
 /* turn the backlight on or off */
 static void charlcd_backlight(struct charlcd *lcd, int on)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
 
        if (!lcd->ops->backlight)
                return;
@@ -134,7 +134,7 @@ static void charlcd_bl_off(struct work_struct *work)
 /* turn the backlight on for a little while */
 void charlcd_poke(struct charlcd *lcd)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
 
        if (!lcd->ops->backlight)
                return;
@@ -152,7 +152,7 @@ EXPORT_SYMBOL_GPL(charlcd_poke);
 
 static void charlcd_gotoxy(struct charlcd *lcd)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
        unsigned int addr;
 
        /*
@@ -170,7 +170,7 @@ static void charlcd_gotoxy(struct charlcd *lcd)
 
 static void charlcd_home(struct charlcd *lcd)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
 
        priv->addr.x = 0;
        priv->addr.y = 0;
@@ -179,7 +179,7 @@ static void charlcd_home(struct charlcd *lcd)
 
 static void charlcd_print(struct charlcd *lcd, char c)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
 
        if (priv->addr.x < lcd->bwidth) {
                if (lcd->char_conv)
@@ -211,7 +211,7 @@ static void charlcd_clear_fast(struct charlcd *lcd)
 /* clears the display and resets X/Y */
 static void charlcd_clear_display(struct charlcd *lcd)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
 
        lcd->ops->write_cmd(lcd, LCD_CMD_DISPLAY_CLEAR);
        priv->addr.x = 0;
@@ -223,7 +223,7 @@ static void charlcd_clear_display(struct charlcd *lcd)
 static int charlcd_init_display(struct charlcd *lcd)
 {
        void (*write_cmd_raw)(struct charlcd *lcd, int cmd);
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
        u8 init;
 
        if (lcd->ifwidth != 4 && lcd->ifwidth != 8)
@@ -369,7 +369,7 @@ static bool parse_xy(const char *s, unsigned long *x, unsigned long *y)
 
 static inline int handle_lcd_special_code(struct charlcd *lcd)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
 
        /* LCD special codes */
 
@@ -580,7 +580,7 @@ static inline int handle_lcd_special_code(struct charlcd *lcd)
 
 static void charlcd_write_char(struct charlcd *lcd, char c)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
 
        /* first, we'll test if we're in escape mode */
        if ((c != '\n') && priv->esc_seq.len >= 0) {
@@ -705,7 +705,7 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
 
 static int charlcd_open(struct inode *inode, struct file *file)
 {
-       struct charlcd_priv *priv = to_priv(the_charlcd);
+       struct charlcd_priv *priv = charlcd_to_priv(the_charlcd);
        int ret;
 
        ret = -EBUSY;
@@ -763,10 +763,24 @@ static void charlcd_puts(struct charlcd *lcd, const char *s)
        }
 }
 
+#ifdef CONFIG_PANEL_BOOT_MESSAGE
+#define LCD_INIT_TEXT CONFIG_PANEL_BOOT_MESSAGE
+#else
+#define LCD_INIT_TEXT "Linux-" UTS_RELEASE "\n"
+#endif
+
+#ifdef CONFIG_CHARLCD_BL_ON
+#define LCD_INIT_BL "\x1b[L+"
+#elif defined(CONFIG_CHARLCD_BL_FLASH)
+#define LCD_INIT_BL "\x1b[L*"
+#else
+#define LCD_INIT_BL "\x1b[L-"
+#endif
+
 /* initialize the LCD driver */
 static int charlcd_init(struct charlcd *lcd)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
        int ret;
 
        if (lcd->ops->backlight) {
@@ -784,13 +798,8 @@ static int charlcd_init(struct charlcd *lcd)
                return ret;
 
        /* display a short message */
-#ifdef CONFIG_PANEL_CHANGE_MESSAGE
-#ifdef CONFIG_PANEL_BOOT_MESSAGE
-       charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE);
-#endif
-#else
-       charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\n");
-#endif
+       charlcd_puts(lcd, "\x1b[Lc\x1b[Lb" LCD_INIT_BL LCD_INIT_TEXT);
+
        /* clear the display on the next device opening */
        priv->must_clear = true;
        charlcd_home(lcd);
@@ -818,6 +827,12 @@ struct charlcd *charlcd_alloc(unsigned int drvdata_size)
 }
 EXPORT_SYMBOL_GPL(charlcd_alloc);
 
+void charlcd_free(struct charlcd *lcd)
+{
+       kfree(charlcd_to_priv(lcd));
+}
+EXPORT_SYMBOL_GPL(charlcd_free);
+
 static int panel_notify_sys(struct notifier_block *this, unsigned long code,
                            void *unused)
 {
@@ -866,7 +881,7 @@ EXPORT_SYMBOL_GPL(charlcd_register);
 
 int charlcd_unregister(struct charlcd *lcd)
 {
-       struct charlcd_priv *priv = to_priv(lcd);
+       struct charlcd_priv *priv = charlcd_to_priv(lcd);
 
        unregister_reboot_notifier(&panel_notifier);
        charlcd_puts(lcd, "\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-");
index 9ad93ea42fdc73e81242ea358ffb357612833367..ab15b64707ad22ef429a7ee6911d72ec9a284bd5 100644 (file)
@@ -271,7 +271,7 @@ static int hd44780_probe(struct platform_device *pdev)
        return 0;
 
 fail:
-       kfree(lcd);
+       charlcd_free(lcd);
        return ret;
 }
 
@@ -280,6 +280,8 @@ static int hd44780_remove(struct platform_device *pdev)
        struct charlcd *lcd = platform_get_drvdata(pdev);
 
        charlcd_unregister(lcd);
+
+       charlcd_free(lcd);
        return 0;
 }
 
index 21b9b2f2470a26d1f2d1c2d5eb4237fe3902af82..e06de63497cf8f00edde8d2d7bcffa5b25cc8e81 100644 (file)
@@ -1620,7 +1620,7 @@ err_lcd_unreg:
        if (lcd.enabled)
                charlcd_unregister(lcd.charlcd);
 err_unreg_device:
-       kfree(lcd.charlcd);
+       charlcd_free(lcd.charlcd);
        lcd.charlcd = NULL;
        parport_unregister_device(pprt);
        pprt = NULL;
@@ -1647,7 +1647,7 @@ static void panel_detach(struct parport *port)
        if (lcd.enabled) {
                charlcd_unregister(lcd.charlcd);
                lcd.initialized = false;
-               kfree(lcd.charlcd);
+               charlcd_free(lcd.charlcd);
                lcd.charlcd = NULL;
        }
 
index cb8347500ce2871e5003d8ce45a4014a97b8de3e..e49028a604295937a59761488d431b9f4837f731 100644 (file)
@@ -506,7 +506,7 @@ static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
 
        ret = lock_device_hotplug_sysfs();
        if (ret)
-               goto out;
+               return ret;
 
        nid = memory_add_physaddr_to_nid(phys_addr);
        ret = __add_memory(nid, phys_addr,
index 76c9969b7124c11cf1638a82cc223999ae5e1b8a..96a6dc9d305c88b842258f4ed91cc7b7b569d506 100644 (file)
@@ -1469,12 +1469,12 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
        if (IS_ERR(gpd_data))
                return PTR_ERR(gpd_data);
 
-       genpd_lock(genpd);
-
        ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
        if (ret)
                goto out;
 
+       genpd_lock(genpd);
+
        dev_pm_domain_set(dev, &genpd->domain);
 
        genpd->device_count++;
@@ -1482,9 +1482,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
 
        list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
 
- out:
        genpd_unlock(genpd);
-
+ out:
        if (ret)
                genpd_free_dev_data(dev, gpd_data);
        else
@@ -1533,15 +1532,15 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
        genpd->device_count--;
        genpd->max_off_time_changed = true;
 
-       if (genpd->detach_dev)
-               genpd->detach_dev(genpd, dev);
-
        dev_pm_domain_set(dev, NULL);
 
        list_del_init(&pdd->list_node);
 
        genpd_unlock(genpd);
 
+       if (genpd->detach_dev)
+               genpd->detach_dev(genpd, dev);
+
        genpd_free_dev_data(dev, gpd_data);
 
        return 0;
index 1fad9291f6aaa6d893c45186a8a24e495f5169fc..7fc5a18e02ad5de5cf7269c79b33a7cecb3bfc21 100644 (file)
@@ -472,7 +472,7 @@ static int software_node_read_string_array(const struct fwnode_handle *fwnode,
                                                val, nval);
 }
 
-struct fwnode_handle *
+static struct fwnode_handle *
 software_node_get_parent(const struct fwnode_handle *fwnode)
 {
        struct software_node *swnode = to_software_node(fwnode);
@@ -481,7 +481,7 @@ software_node_get_parent(const struct fwnode_handle *fwnode)
                        NULL;
 }
 
-struct fwnode_handle *
+static struct fwnode_handle *
 software_node_get_next_child(const struct fwnode_handle *fwnode,
                             struct fwnode_handle *child)
 {
index 1e6edd568214f40400a5fc7eff474b316a4afda0..bf1c61cab8eb1cec135432fe52f1145653cfacd7 100644 (file)
@@ -656,7 +656,7 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
                        return -EBADF;
 
                l = f->f_mapping->host->i_bdev->bd_disk->private_data;
-               if (l->lo_state == Lo_unbound) {
+               if (l->lo_state != Lo_bound) {
                        return -EINVAL;
                }
                f = l->lo_backing_file;
index 417a9f15c11631cae518a9924c0e480fd6b85fd2..d7ac09c092f2ac8a5caf8632dca569b38b9472a6 100644 (file)
@@ -1748,6 +1748,11 @@ static int __init null_init(void)
                return -EINVAL;
        }
 
+       if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
+               pr_err("null_blk: invalid home_node value\n");
+               g_home_node = NUMA_NO_NODE;
+       }
+
        if (g_queue_mode == NULL_Q_RQ) {
                pr_err("null_blk: legacy IO path no longer available\n");
                return -EINVAL;
index 96670eefaeb2c3458964110a39bddd942ff9fde1..6d415b20fb70651c10aa87af6b7f18e53f0aaddd 100644 (file)
@@ -314,6 +314,7 @@ static void pcd_init_units(void)
                disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
                                                   1, BLK_MQ_F_SHOULD_MERGE);
                if (IS_ERR(disk->queue)) {
+                       put_disk(disk);
                        disk->queue = NULL;
                        continue;
                }
@@ -749,8 +750,14 @@ static int pcd_detect(void)
                return 0;
 
        printk("%s: No CD-ROM drive found\n", name);
-       for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
+       for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+               if (!cd->disk)
+                       continue;
+               blk_cleanup_queue(cd->disk->queue);
+               cd->disk->queue = NULL;
+               blk_mq_free_tag_set(&cd->tag_set);
                put_disk(cd->disk);
+       }
        pi_unregister_driver(par_drv);
        return -1;
 }
@@ -1006,8 +1013,14 @@ static int __init pcd_init(void)
        pcd_probe_capabilities();
 
        if (register_blkdev(major, name)) {
-               for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
+               for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+                       if (!cd->disk)
+                               continue;
+
+                       blk_cleanup_queue(cd->disk->queue);
+                       blk_mq_free_tag_set(&cd->tag_set);
                        put_disk(cd->disk);
+               }
                return -EBUSY;
        }
 
@@ -1028,6 +1041,9 @@ static void __exit pcd_exit(void)
        int unit;
 
        for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+               if (!cd->disk)
+                       continue;
+
                if (cd->present) {
                        del_gendisk(cd->disk);
                        pi_release(cd->pi);
index e92e7a8eeeb2bf066d522277ead805324aecde9e..35e6e271b219ccbe524e60499f78ff65fcb67994 100644 (file)
@@ -761,8 +761,14 @@ static int pf_detect(void)
                return 0;
 
        printk("%s: No ATAPI disk detected\n", name);
-       for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
+       for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+               if (!pf->disk)
+                       continue;
+               blk_cleanup_queue(pf->disk->queue);
+               pf->disk->queue = NULL;
+               blk_mq_free_tag_set(&pf->tag_set);
                put_disk(pf->disk);
+       }
        pi_unregister_driver(par_drv);
        return -1;
 }
@@ -1025,8 +1031,13 @@ static int __init pf_init(void)
        pf_busy = 0;
 
        if (register_blkdev(major, name)) {
-               for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
+               for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+                       if (!pf->disk)
+                               continue;
+                       blk_cleanup_queue(pf->disk->queue);
+                       blk_mq_free_tag_set(&pf->tag_set);
                        put_disk(pf->disk);
+               }
                return -EBUSY;
        }
 
@@ -1047,13 +1058,18 @@ static void __exit pf_exit(void)
        int unit;
        unregister_blkdev(major, name);
        for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
-               if (!pf->present)
+               if (!pf->disk)
                        continue;
-               del_gendisk(pf->disk);
+
+               if (pf->present)
+                       del_gendisk(pf->disk);
+
                blk_cleanup_queue(pf->disk->queue);
                blk_mq_free_tag_set(&pf->tag_set);
                put_disk(pf->disk);
-               pi_release(pf->pi);
+
+               if (pf->present)
+                       pi_release(pf->pi);
        }
 }
 
index 4ba967d65cf963c6f2a2084a62f2dbad1935c3bf..2210c1b9491ba2e9f690dd4a26b209b64f4ad925 100644 (file)
@@ -833,7 +833,7 @@ static int parse_rbd_opts_token(char *c, void *private)
                pctx->opts->queue_depth = intval;
                break;
        case Opt_alloc_size:
-               if (intval < 1) {
+               if (intval < SECTOR_SIZE) {
                        pr_err("alloc_size out of range\n");
                        return -EINVAL;
                }
@@ -924,23 +924,6 @@ static void rbd_put_client(struct rbd_client *rbdc)
                kref_put(&rbdc->kref, rbd_client_release);
 }
 
-static int wait_for_latest_osdmap(struct ceph_client *client)
-{
-       u64 newest_epoch;
-       int ret;
-
-       ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
-       if (ret)
-               return ret;
-
-       if (client->osdc.osdmap->epoch >= newest_epoch)
-               return 0;
-
-       ceph_osdc_maybe_request_map(&client->osdc);
-       return ceph_monc_wait_osdmap(&client->monc, newest_epoch,
-                                    client->options->mount_timeout);
-}
-
 /*
  * Get a ceph client with specific addr and configuration, if one does
  * not exist create it.  Either way, ceph_opts is consumed by this
@@ -960,7 +943,8 @@ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
                 * Using an existing client.  Make sure ->pg_pools is up to
                 * date before we look up the pool id in do_rbd_add().
                 */
-               ret = wait_for_latest_osdmap(rbdc->client);
+               ret = ceph_wait_for_latest_osdmap(rbdc->client,
+                                       rbdc->client->options->mount_timeout);
                if (ret) {
                        rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
                        rbd_put_client(rbdc);
@@ -4203,12 +4187,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        q->limits.max_sectors = queue_max_hw_sectors(q);
        blk_queue_max_segments(q, USHRT_MAX);
        blk_queue_max_segment_size(q, UINT_MAX);
-       blk_queue_io_min(q, objset_bytes);
-       blk_queue_io_opt(q, objset_bytes);
+       blk_queue_io_min(q, rbd_dev->opts->alloc_size);
+       blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
 
        if (rbd_dev->opts->trim) {
                blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
-               q->limits.discard_granularity = objset_bytes;
+               q->limits.discard_granularity = rbd_dev->opts->alloc_size;
                blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
                blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
        }
index 4bc083b7c9b541a0fede52156bdae003c1d678df..2a7ca4a1e6f7bd5e2730b13a30760f9506c6973c 100644 (file)
@@ -513,6 +513,8 @@ static int init_vq(struct virtio_blk *vblk)
        if (err)
                num_vqs = 1;
 
+       num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
+
        vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
        if (!vblk->vqs)
                return -ENOMEM;
index 87ccef4bd69e904b1f19403e82403bd5cd13a277..32a21b8d1d85f430a031d342bff3a646c5ffa427 100644 (file)
@@ -1090,6 +1090,8 @@ static int ace_setup(struct ace_device *ace)
        return 0;
 
 err_read:
+       /* prevent double queue cleanup */
+       ace->gd->queue = NULL;
        put_disk(ace->gd);
 err_alloc_disk:
        blk_cleanup_queue(ace->queue);
index e7a5f1d1c3141acf9ef79c79fc5829886ef73245..d58a359a66225f39682c067739eb9843bae36b80 100644 (file)
@@ -290,18 +290,8 @@ static ssize_t idle_store(struct device *dev,
        struct zram *zram = dev_to_zram(dev);
        unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
        int index;
-       char mode_buf[8];
-       ssize_t sz;
 
-       sz = strscpy(mode_buf, buf, sizeof(mode_buf));
-       if (sz <= 0)
-               return -EINVAL;
-
-       /* ignore trailing new line */
-       if (mode_buf[sz - 1] == '\n')
-               mode_buf[sz - 1] = 0x00;
-
-       if (strcmp(mode_buf, "all"))
+       if (!sysfs_streq(buf, "all"))
                return -EINVAL;
 
        down_read(&zram->init_lock);
@@ -635,25 +625,15 @@ static ssize_t writeback_store(struct device *dev,
        struct bio bio;
        struct bio_vec bio_vec;
        struct page *page;
-       ssize_t ret, sz;
-       char mode_buf[8];
-       int mode = -1;
+       ssize_t ret;
+       int mode;
        unsigned long blk_idx = 0;
 
-       sz = strscpy(mode_buf, buf, sizeof(mode_buf));
-       if (sz <= 0)
-               return -EINVAL;
-
-       /* ignore trailing newline */
-       if (mode_buf[sz - 1] == '\n')
-               mode_buf[sz - 1] = 0x00;
-
-       if (!strcmp(mode_buf, "idle"))
+       if (sysfs_streq(buf, "idle"))
                mode = IDLE_WRITEBACK;
-       else if (!strcmp(mode_buf, "huge"))
+       else if (sysfs_streq(buf, "huge"))
                mode = HUGE_WRITEBACK;
-
-       if (mode == -1)
+       else
                return -EINVAL;
 
        down_read(&zram->init_lock);
@@ -794,18 +774,18 @@ struct zram_work {
        struct zram *zram;
        unsigned long entry;
        struct bio *bio;
+       struct bio_vec bvec;
 };
 
 #if PAGE_SIZE != 4096
 static void zram_sync_read(struct work_struct *work)
 {
-       struct bio_vec bvec;
        struct zram_work *zw = container_of(work, struct zram_work, work);
        struct zram *zram = zw->zram;
        unsigned long entry = zw->entry;
        struct bio *bio = zw->bio;
 
-       read_from_bdev_async(zram, &bvec, entry, bio);
+       read_from_bdev_async(zram, &zw->bvec, entry, bio);
 }
 
 /*
@@ -818,6 +798,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
 {
        struct zram_work work;
 
+       work.bvec = *bvec;
        work.zram = zram;
        work.entry = entry;
        work.bio = bio;
index ded198328f216066959825950ebfbe5aef29027d..7db48ae65cd2dc946b6a1757baa20993158b903e 100644 (file)
@@ -2942,6 +2942,7 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
                return 0;
        }
 
+       irq_set_status_flags(irq, IRQ_NOAUTOEN);
        ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler,
                               0, "OOB Wake-on-BT", data);
        if (ret) {
@@ -2956,7 +2957,6 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
        }
 
        data->oob_wake_irq = irq;
-       disable_irq(irq);
        bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq);
        return 0;
 }
index 72866a004f075b79257c9d2df0c7b5b60852c31e..466ebd84ad1774096ecc45dd9f4ebe13ac785602 100644 (file)
@@ -348,7 +348,7 @@ config XILINX_HWICAP
 
 config R3964
        tristate "Siemens R3964 line discipline"
-       depends on TTY
+       depends on TTY && BROKEN
        ---help---
          This driver allows synchronous communication with devices using the
          Siemens R3964 packet protocol. Unless you are dealing with special
index ff0b199be4729757743bbd72bff2fc61a842d291..f2411468f33ff44707e45ab34cd359d2c3b5a0f0 100644 (file)
@@ -66,7 +66,6 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
                return;
        }
 
-       memset(&p, 0, sizeof(p));
        p.addr = base_addr;
        p.space = space;
        p.regspacing = offset;
index e8ba678347466db181a08768158e56930090aa7b..00bf4b17edbfafb5c9d25cb524f35e8d59c7a074 100644 (file)
@@ -214,6 +214,9 @@ struct ipmi_user {
 
        /* Does this interface receive IPMI events? */
        bool gets_events;
+
+       /* Free must run in process context for RCU cleanup. */
+       struct work_struct remove_work;
 };
 
 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
@@ -1157,6 +1160,15 @@ static int intf_err_seq(struct ipmi_smi *intf,
        return rv;
 }
 
+static void free_user_work(struct work_struct *work)
+{
+       struct ipmi_user *user = container_of(work, struct ipmi_user,
+                                             remove_work);
+
+       cleanup_srcu_struct(&user->release_barrier);
+       kfree(user);
+}
+
 int ipmi_create_user(unsigned int          if_num,
                     const struct ipmi_user_hndl *handler,
                     void                  *handler_data,
@@ -1200,6 +1212,8 @@ int ipmi_create_user(unsigned int          if_num,
        goto out_kfree;
 
  found:
+       INIT_WORK(&new_user->remove_work, free_user_work);
+
        rv = init_srcu_struct(&new_user->release_barrier);
        if (rv)
                goto out_kfree;
@@ -1260,8 +1274,9 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
 static void free_user(struct kref *ref)
 {
        struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
-       cleanup_srcu_struct(&user->release_barrier);
-       kfree(user);
+
+       /* SRCU cleanup must happen in task context. */
+       schedule_work(&user->remove_work);
 }
 
 static void _ipmi_destroy_user(struct ipmi_user *user)
index 01946cad3d1381ba7eed020544c775ed9a6e3f5f..682221eebd66101cb67b04e2ac979ab7caeae51c 100644 (file)
@@ -118,6 +118,8 @@ void __init ipmi_hardcode_init(void)
        char *str;
        char *si_type[SI_MAX_PARMS];
 
+       memset(si_type, 0, sizeof(si_type));
+
        /* Parse out the si_type string into its components. */
        str = si_type_str;
        if (*str != '\0') {
index d8b77133a83a2a2c59d3d7873db8d4d110f5dfb0..f824563fc28dd091303f89de651589ea5350a64b 100644 (file)
@@ -37,8 +37,8 @@
  *
  * Returns size of the event. If it is an invalid event, returns 0.
  */
-static int calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
-                               struct tcg_pcr_event *event_header)
+static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+                                  struct tcg_pcr_event *event_header)
 {
        struct tcg_efi_specid_event_head *efispecid;
        struct tcg_event_field *event_field;
index 8856cce5a23b2858b58b69373f4cd89e2f898abb..817ae09a369ec2ba192a68f302205eaef7aadeb5 100644 (file)
@@ -233,12 +233,19 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait)
        __poll_t mask = 0;
 
        poll_wait(file, &priv->async_wait, wait);
+       mutex_lock(&priv->buffer_mutex);
 
-       if (!priv->response_read || priv->response_length)
+       /*
+        * The response_length indicates if there is still response
+        * (or part of it) to be consumed. Partial reads decrease it
+        * by the number of bytes read, and write resets it the zero.
+        */
+       if (priv->response_length)
                mask = EPOLLIN | EPOLLRDNORM;
        else
                mask = EPOLLOUT | EPOLLWRNORM;
 
+       mutex_unlock(&priv->buffer_mutex);
        return mask;
 }
 
index 83ece5639f8639e7bb397ec4feeb121006073208..ae1030c9b086de511aa5c2bd0800fdcf6c2a1051 100644 (file)
@@ -402,15 +402,13 @@ int tpm_pm_suspend(struct device *dev)
        if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
                return 0;
 
-       if (chip->flags & TPM_CHIP_FLAG_TPM2) {
-               mutex_lock(&chip->tpm_mutex);
-               if (!tpm_chip_start(chip)) {
+       if (!tpm_chip_start(chip)) {
+               if (chip->flags & TPM_CHIP_FLAG_TPM2)
                        tpm2_shutdown(chip, TPM2_SU_STATE);
-                       tpm_chip_stop(chip);
-               }
-               mutex_unlock(&chip->tpm_mutex);
-       } else {
-               rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+               else
+                       rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+
+               tpm_chip_stop(chip);
        }
 
        return rc;
index 89d6f3736dbf605036e4eefb70efd2ef2ee4f386..f8edbb65eda3564cf99623c852fe474a93803123 100644 (file)
@@ -20,8 +20,7 @@
 #define PROG_ID_MAX            7
 
 #define PROG_STATUS_MASK(id)   (1 << ((id) + 8))
-#define PROG_PRES_MASK         0x7
-#define PROG_PRES(layout, pckr)        ((pckr >> layout->pres_shift) & PROG_PRES_MASK)
+#define PROG_PRES(layout, pckr)        ((pckr >> layout->pres_shift) & layout->pres_mask)
 #define PROG_MAX_RM9200_CSS    3
 
 struct clk_programmable {
@@ -37,20 +36,29 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
                                                  unsigned long parent_rate)
 {
        struct clk_programmable *prog = to_clk_programmable(hw);
+       const struct clk_programmable_layout *layout = prog->layout;
        unsigned int pckr;
+       unsigned long rate;
 
        regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
 
-       return parent_rate >> PROG_PRES(prog->layout, pckr);
+       if (layout->is_pres_direct)
+               rate = parent_rate / (PROG_PRES(layout, pckr) + 1);
+       else
+               rate = parent_rate >> PROG_PRES(layout, pckr);
+
+       return rate;
 }
 
 static int clk_programmable_determine_rate(struct clk_hw *hw,
                                           struct clk_rate_request *req)
 {
+       struct clk_programmable *prog = to_clk_programmable(hw);
+       const struct clk_programmable_layout *layout = prog->layout;
        struct clk_hw *parent;
        long best_rate = -EINVAL;
        unsigned long parent_rate;
-       unsigned long tmp_rate;
+       unsigned long tmp_rate = 0;
        int shift;
        int i;
 
@@ -60,10 +68,18 @@ static int clk_programmable_determine_rate(struct clk_hw *hw,
                        continue;
 
                parent_rate = clk_hw_get_rate(parent);
-               for (shift = 0; shift < PROG_PRES_MASK; shift++) {
-                       tmp_rate = parent_rate >> shift;
-                       if (tmp_rate <= req->rate)
-                               break;
+               if (layout->is_pres_direct) {
+                       for (shift = 0; shift <= layout->pres_mask; shift++) {
+                               tmp_rate = parent_rate / (shift + 1);
+                               if (tmp_rate <= req->rate)
+                                       break;
+                       }
+               } else {
+                       for (shift = 0; shift < layout->pres_mask; shift++) {
+                               tmp_rate = parent_rate >> shift;
+                               if (tmp_rate <= req->rate)
+                                       break;
+                       }
                }
 
                if (tmp_rate > req->rate)
@@ -137,16 +153,23 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate,
        if (!div)
                return -EINVAL;
 
-       shift = fls(div) - 1;
+       if (layout->is_pres_direct) {
+               shift = div - 1;
 
-       if (div != (1 << shift))
-               return -EINVAL;
+               if (shift > layout->pres_mask)
+                       return -EINVAL;
+       } else {
+               shift = fls(div) - 1;
 
-       if (shift >= PROG_PRES_MASK)
-               return -EINVAL;
+               if (div != (1 << shift))
+                       return -EINVAL;
+
+               if (shift >= layout->pres_mask)
+                       return -EINVAL;
+       }
 
        regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id),
-                          PROG_PRES_MASK << layout->pres_shift,
+                          layout->pres_mask << layout->pres_shift,
                           shift << layout->pres_shift);
 
        return 0;
@@ -202,19 +225,25 @@ at91_clk_register_programmable(struct regmap *regmap,
 }
 
 const struct clk_programmable_layout at91rm9200_programmable_layout = {
+       .pres_mask = 0x7,
        .pres_shift = 2,
        .css_mask = 0x3,
        .have_slck_mck = 0,
+       .is_pres_direct = 0,
 };
 
 const struct clk_programmable_layout at91sam9g45_programmable_layout = {
+       .pres_mask = 0x7,
        .pres_shift = 2,
        .css_mask = 0x3,
        .have_slck_mck = 1,
+       .is_pres_direct = 0,
 };
 
 const struct clk_programmable_layout at91sam9x5_programmable_layout = {
+       .pres_mask = 0x7,
        .pres_shift = 4,
        .css_mask = 0x7,
        .have_slck_mck = 0,
+       .is_pres_direct = 0,
 };
index 672a79bda88c960d7655a600834705152f5a4eaf..a0e5ce9c9b9ea6981948ed102be4f8a870e6a99c 100644 (file)
@@ -71,9 +71,11 @@ struct clk_pll_characteristics {
 };
 
 struct clk_programmable_layout {
+       u8 pres_mask;
        u8 pres_shift;
        u8 css_mask;
        u8 have_slck_mck;
+       u8 is_pres_direct;
 };
 
 extern const struct clk_programmable_layout at91rm9200_programmable_layout;
index 1f70cb164b06f310d867d54797dfd0d87be0a1d3..81943fac4537ef3c8e8d0f611a897b2dd6b8171f 100644 (file)
@@ -125,6 +125,14 @@ static const struct {
          .pll = true },
 };
 
+static const struct clk_programmable_layout sama5d2_programmable_layout = {
+       .pres_mask = 0xff,
+       .pres_shift = 4,
+       .css_mask = 0x7,
+       .have_slck_mck = 0,
+       .is_pres_direct = 1,
+};
+
 static void __init sama5d2_pmc_setup(struct device_node *np)
 {
        struct clk_range range = CLK_RANGE(0, 0);
@@ -249,7 +257,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
 
                hw = at91_clk_register_programmable(regmap, name,
                                                    parent_names, 6, i,
-                                                   &at91sam9x5_programmable_layout);
+                                                   &sama5d2_programmable_layout);
                if (IS_ERR(hw))
                        goto err_free;
        }
index 8c4435c53f09c255f83adb10426e254bc3cf485b..6e787cc9e5b90ded5c9fd9f613301c6c735133b3 100644 (file)
@@ -46,6 +46,8 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
        if (con_id)
                best_possible += 1;
 
+       lockdep_assert_held(&clocks_mutex);
+
        list_for_each_entry(p, &clocks, node) {
                match = 0;
                if (p->dev_id) {
@@ -402,7 +404,10 @@ void devm_clk_release_clkdev(struct device *dev, const char *con_id,
        struct clk_lookup *cl;
        int rval;
 
+       mutex_lock(&clocks_mutex);
        cl = clk_find(dev_id, con_id);
+       mutex_unlock(&clocks_mutex);
+
        WARN_ON(!cl);
        rval = devres_release(dev, devm_clkdev_release,
                              devm_clk_match_clkdev, cl);
index 1acfa3e3cfb401667fbb19666f1aac2add081826..113d71042199b3d3599da84df07e932b1c7902a9 100644 (file)
@@ -362,7 +362,7 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
 
        switch (pll_clk->type) {
        case PLL_1416X:
-               if (!pll->rate_table)
+               if (!pll_clk->rate_table)
                        init.ops = &clk_pll1416x_min_ops;
                else
                        init.ops = &clk_pll1416x_ops;
index 9628d4e7690bbdc632f2930bd6feb9653347595b..85daf826619ab4fe707b03211243279a06464625 100644 (file)
@@ -169,11 +169,10 @@ struct clk *mtk_clk_register_gate(
                return ERR_PTR(-ENOMEM);
 
        init.name = name;
-       init.flags = CLK_SET_RATE_PARENT;
+       init.flags = flags | CLK_SET_RATE_PARENT;
        init.parent_names = parent_name ? &parent_name : NULL;
        init.num_parents = parent_name ? 1 : 0;
        init.ops = ops;
-       init.flags = flags;
 
        cg->regmap = regmap;
        cg->set_ofs = set_ofs;
index 41e16dd7272a5943c842eda7064bd6c0394c3664..7a14ac9b2fecfece592807d72c38043bb84c12e4 100644 (file)
@@ -120,7 +120,7 @@ static bool meson_clk_pll_is_better(unsigned long rate,
                        return true;
        } else {
                /* Round down */
-               if (now < rate && best < now)
+               if (now <= rate && best < now)
                        return true;
        }
 
index 0e1ce8c03259b73221266de7aa6491be331815f3..f7b11e1eeebe894c26425067fb5cf14cab09a029 100644 (file)
@@ -960,14 +960,14 @@ static struct clk_regmap g12a_sd_emmc_c_clk0 = {
 /* VPU Clock */
 
 static const char * const g12a_vpu_parent_names[] = {
-       "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7",
+       "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7",
        "mpll1", "vid_pll", "hifi_pll", "gp0_pll",
 };
 
 static struct clk_regmap g12a_vpu_0_sel = {
        .data = &(struct clk_regmap_mux_data){
                .offset = HHI_VPU_CLK_CNTL,
-               .mask = 0x3,
+               .mask = 0x7,
                .shift = 9,
        },
        .hw.init = &(struct clk_init_data){
@@ -1011,7 +1011,7 @@ static struct clk_regmap g12a_vpu_0 = {
 static struct clk_regmap g12a_vpu_1_sel = {
        .data = &(struct clk_regmap_mux_data){
                .offset = HHI_VPU_CLK_CNTL,
-               .mask = 0x3,
+               .mask = 0x7,
                .shift = 25,
        },
        .hw.init = &(struct clk_init_data){
index 04df2e208ed6ec7a493f62debdbf9051e0528038..29ffb4fde7145adefe855c11283825e1e4ea1f11 100644 (file)
@@ -2216,6 +2216,7 @@ static struct clk_regmap gxbb_vdec_1_div = {
                .offset = HHI_VDEC_CLK_CNTL,
                .shift = 0,
                .width = 7,
+               .flags = CLK_DIVIDER_ROUND_CLOSEST,
        },
        .hw.init = &(struct clk_init_data){
                .name = "vdec_1_div",
@@ -2261,6 +2262,7 @@ static struct clk_regmap gxbb_vdec_hevc_div = {
                .offset = HHI_VDEC2_CLK_CNTL,
                .shift = 16,
                .width = 7,
+               .flags = CLK_DIVIDER_ROUND_CLOSEST,
        },
        .hw.init = &(struct clk_init_data){
                .name = "vdec_hevc_div",
index 08bcc01c0923863790d32dafbc1274bdf2358a28..daff235bc763348a03bebdcd9695cc8e08856744 100644 (file)
@@ -82,8 +82,8 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
        div = _get_table_val(meson_parm_read(clk->map, &pll_div->val),
                             meson_parm_read(clk->map, &pll_div->sel));
        if (!div || !div->divider) {
-               pr_info("%s: Invalid config value for vid_pll_div\n", __func__);
-               return parent_rate;
+               pr_debug("%s: Invalid config value for vid_pll_div\n", __func__);
+               return 0;
        }
 
        return DIV_ROUND_UP_ULL(parent_rate * div->multiplier, div->divider);
index 9b49adb20d07c68ef8ddd01f8d35e73ed746f64f..cbcdf664f33604c283a64c2e57dea8342537a675 100644 (file)
@@ -167,7 +167,7 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
                           unsigned long parent_rate)
 {
        struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
-       u32 n_mask, k_mask, m_mask, p_mask;
+       u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0;
        struct _ccu_nkmp _nkmp;
        unsigned long flags;
        u32 reg;
@@ -186,10 +186,24 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
 
        ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
 
-       n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift);
-       k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift);
-       m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift);
-       p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift);
+       /*
+        * If width is 0, GENMASK() macro may not generate expected mask (0)
+        * as it falls under undefined behaviour by C standard due to shifts
+        * which are equal or greater than width of left operand. This can
+        * be easily avoided by explicitly checking if width is 0.
+        */
+       if (nkmp->n.width)
+               n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1,
+                                nkmp->n.shift);
+       if (nkmp->k.width)
+               k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1,
+                                nkmp->k.shift);
+       if (nkmp->m.width)
+               m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1,
+                                nkmp->m.shift);
+       if (nkmp->p.width)
+               p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1,
+                                nkmp->p.shift);
 
        spin_lock_irqsave(nkmp->common.lock, flags);
 
index d977193842dfed1fead553dd2240013c5a0d380a..19174835693b91cd473b59c7fed787f128f268d1 100644 (file)
@@ -165,7 +165,7 @@ static const struct clk_ops plt_clk_ops = {
 };
 
 static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
-                                       void __iomem *base,
+                                       const struct pmc_clk_data *pmc_data,
                                        const char **parent_names,
                                        int num_parents)
 {
@@ -184,9 +184,17 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
        init.num_parents = num_parents;
 
        pclk->hw.init = &init;
-       pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
+       pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
        spin_lock_init(&pclk->lock);
 
+       /*
+        * On some systems, the pmc_plt_clocks already enabled by the
+        * firmware are being marked as critical to avoid them being
+        * gated by the clock framework.
+        */
+       if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw))
+               init.flags |= CLK_IS_CRITICAL;
+
        ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
        if (ret) {
                pclk = ERR_PTR(ret);
@@ -332,7 +340,7 @@ static int plt_clk_probe(struct platform_device *pdev)
                return PTR_ERR(parent_names);
 
        for (i = 0; i < PMC_CLK_NUM; i++) {
-               data->clks[i] = plt_clk_register(pdev, i, pmc_data->base,
+               data->clks[i] = plt_clk_register(pdev, i, pmc_data,
                                                 parent_names, data->nparents);
                if (IS_ERR(data->clks[i])) {
                        err = PTR_ERR(data->clks[i]);
index 171502a356aa1fb19bf285cdc5aade19ab861723..4b3d143f0f8a4445df12fcf589bb67fdfaa985df 100644 (file)
@@ -145,6 +145,7 @@ config VT8500_TIMER
 config NPCM7XX_TIMER
        bool "NPCM7xx timer driver" if COMPILE_TEST
        depends on HAS_IOMEM
+       select TIMER_OF
        select CLKSRC_MMIO
        help
          Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
index aa4ec53281cea585214c3a5f8b4faf941e7e7bd3..ea373cfbcecb5d8241f6a176a4a32a86a630c083 100644 (file)
@@ -9,7 +9,7 @@
  * published by the Free Software Foundation.
  */
 
-#define pr_fmt(fmt)    "arm_arch_timer: " fmt
+#define pr_fmt(fmt)    "arch_timer: " fmt
 
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -33,9 +33,6 @@
 
 #include <clocksource/arm_arch_timer.h>
 
-#undef pr_fmt
-#define pr_fmt(fmt) "arch_timer: " fmt
-
 #define CNTTIDR                0x08
 #define CNTTIDR_VIRT(n)        (BIT(1) << ((n) * 4))
 
index a8dd80576c95a0dd24c751cad4387a6754ad9de5..857f8c0862744268eed2b1eee8beae301bbe0bbd 100644 (file)
@@ -31,16 +31,9 @@ static u64 notrace clps711x_sched_clock_read(void)
        return ~readw(tcd);
 }
 
-static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base)
+static void __init clps711x_clksrc_init(struct clk *clock, void __iomem *base)
 {
-       unsigned long rate;
-
-       if (!base)
-               return -ENOMEM;
-       if (IS_ERR(clock))
-               return PTR_ERR(clock);
-
-       rate = clk_get_rate(clock);
+       unsigned long rate = clk_get_rate(clock);
 
        tcd = base;
 
@@ -48,8 +41,6 @@ static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base)
                              clocksource_mmio_readw_down);
 
        sched_clock_register(clps711x_sched_clock_read, 16, rate);
-
-       return 0;
 }
 
 static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id)
@@ -67,13 +58,6 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
        struct clock_event_device *clkevt;
        unsigned long rate;
 
-       if (!irq)
-               return -EINVAL;
-       if (!base)
-               return -ENOMEM;
-       if (IS_ERR(clock))
-               return PTR_ERR(clock);
-
        clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
        if (!clkevt)
                return -ENOMEM;
@@ -93,31 +77,29 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
                           "clps711x-timer", clkevt);
 }
 
-void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
-                                unsigned int irq)
-{
-       struct clk *tc1 = clk_get_sys("clps711x-timer.0", NULL);
-       struct clk *tc2 = clk_get_sys("clps711x-timer.1", NULL);
-
-       BUG_ON(_clps711x_clksrc_init(tc1, tc1_base));
-       BUG_ON(_clps711x_clkevt_init(tc2, tc2_base, irq));
-}
-
-#ifdef CONFIG_TIMER_OF
 static int __init clps711x_timer_init(struct device_node *np)
 {
        unsigned int irq = irq_of_parse_and_map(np, 0);
        struct clk *clock = of_clk_get(np, 0);
        void __iomem *base = of_iomap(np, 0);
 
+       if (!base)
+               return -ENOMEM;
+       if (!irq)
+               return -EINVAL;
+       if (IS_ERR(clock))
+               return PTR_ERR(clock);
+
        switch (of_alias_get_id(np, "timer")) {
        case CLPS711X_CLKSRC_CLOCKSOURCE:
-               return _clps711x_clksrc_init(clock, base);
+               clps711x_clksrc_init(clock, base);
+               break;
        case CLPS711X_CLKSRC_CLOCKEVENT:
                return _clps711x_clkevt_init(clock, base, irq);
        default:
                return -EINVAL;
        }
+
+       return 0;
 }
 TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
-#endif
index 54f8a331b53a0735e21e6befb4c894dae8f76f97..37671a5d4ed9fe1e59236a543b4c2b28be89f20e 100644 (file)
@@ -67,7 +67,7 @@ static irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-struct irqaction gic_compare_irqaction = {
+static struct irqaction gic_compare_irqaction = {
        .handler = gic_compare_interrupt,
        .percpu_dev_id = &gic_clockevent_device,
        .flags = IRQF_PERCPU | IRQF_TIMER,
index 43f4d5c4d6fa4fdb8f4581d71010e4b54ecd18da..f987027ca56645dab3a604e9e872676d7c3406c7 100644 (file)
@@ -71,7 +71,7 @@ static u64 tc_get_cycles32(struct clocksource *cs)
        return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
 }
 
-void tc_clksrc_suspend(struct clocksource *cs)
+static void tc_clksrc_suspend(struct clocksource *cs)
 {
        int i;
 
@@ -86,7 +86,7 @@ void tc_clksrc_suspend(struct clocksource *cs)
        bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
 }
 
-void tc_clksrc_resume(struct clocksource *cs)
+static void tc_clksrc_resume(struct clocksource *cs)
 {
        int i;
 
index eed6feff8b5f23673de989932afcd806e858ecfc..30c6f4ce672b3b1ac16645159398c66e3129aae9 100644 (file)
@@ -296,4 +296,4 @@ err_alloc:
 TIMER_OF_DECLARE(ox810se_rps,
                       "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
 TIMER_OF_DECLARE(ox820_rps,
-                      "oxsemi,ox820se-rps-timer", oxnas_rps_timer_init);
+                      "oxsemi,ox820-rps-timer", oxnas_rps_timer_init);
index e8163693e936e92a54a5ed8a6145cbe4618f9cde..5e6038fbf115d10bc82cc77548b709c3145e4a43 100644 (file)
@@ -58,7 +58,7 @@ static u64 riscv_sched_clock(void)
 static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = {
        .name           = "riscv_clocksource",
        .rating         = 300,
-       .mask           = CLOCKSOURCE_MASK(BITS_PER_LONG),
+       .mask           = CLOCKSOURCE_MASK(64),
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
        .read           = riscv_clocksource_rdtime,
 };
@@ -120,8 +120,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
                return error;
        }
 
-       sched_clock_register(riscv_sched_clock,
-                       BITS_PER_LONG, riscv_timebase);
+       sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
 
        error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
                         "clockevents/riscv/timer:starting",
index c364027638e1aeccdb02c76963ad3212a13096bc..ee8ec5a8cb1668aa770fb0c99af9dd1dc89a3ad8 100644 (file)
@@ -585,34 +585,6 @@ static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
        return 0;
 }
 
-/* Optimized set_load which removes costly spin wait in timer_start */
-int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
-                            unsigned int load)
-{
-       u32 l;
-
-       if (unlikely(!timer))
-               return -EINVAL;
-
-       omap_dm_timer_enable(timer);
-
-       l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
-       if (autoreload) {
-               l |= OMAP_TIMER_CTRL_AR;
-               omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
-       } else {
-               l &= ~OMAP_TIMER_CTRL_AR;
-       }
-       l |= OMAP_TIMER_CTRL_ST;
-
-       __omap_dm_timer_load_start(timer, l, load, timer->posted);
-
-       /* Save the context */
-       timer->context.tclr = l;
-       timer->context.tldr = load;
-       timer->context.tcrr = load;
-       return 0;
-}
 static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
                                   unsigned int match)
 {
index e22f0dbaebb1d97e407d0be2b67da9b25f8af8dc..2986119dd31fb8391e3256a88942272870acad44 100644 (file)
@@ -385,7 +385,10 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
        if (ret)
                return ret;
 
-       return cppc_perf.guaranteed_perf;
+       if (cppc_perf.guaranteed_perf)
+               return cppc_perf.guaranteed_perf;
+
+       return cppc_perf.nominal_perf;
 }
 
 #else /* CONFIG_ACPI_CPPC_LIB */
@@ -2593,6 +2596,9 @@ static int __init intel_pstate_init(void)
        const struct x86_cpu_id *id;
        int rc;
 
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return -ENODEV;
+
        if (no_load)
                return -ENODEV;
 
@@ -2608,7 +2614,7 @@ static int __init intel_pstate_init(void)
        } else {
                id = x86_match_cpu(intel_pstate_cpu_ids);
                if (!id) {
-                       pr_info("CPU ID not supported\n");
+                       pr_info("CPU model not supported\n");
                        return -ENODEV;
                }
 
index 3f49427766b8810cb361ce2d8a1b8331541da5c0..2b51e0718c9f6e493b8659a2ad3dd24de8eaab0d 100644 (file)
@@ -189,8 +189,8 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
 
        clk_put(priv->clk);
        dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
-       kfree(priv);
        dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+       kfree(priv);
 
        return 0;
 }
index b1eadc6652b5f236897811357bd49ac364498f5e..7205d9f4029e11adb7f49d0e57bfdbf11b069fc0 100644 (file)
@@ -865,19 +865,18 @@ static int ahash_update_ctx(struct ahash_request *req)
                if (ret)
                        goto unmap_ctx;
 
-               if (mapped_nents) {
+               if (mapped_nents)
                        sg_to_sec4_sg_last(req->src, mapped_nents,
                                           edesc->sec4_sg + sec4_sg_src_index,
                                           0);
-                       if (*next_buflen)
-                               scatterwalk_map_and_copy(next_buf, req->src,
-                                                        to_hash - *buflen,
-                                                        *next_buflen, 0);
-               } else {
+               else
                        sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
                                            1);
-               }
 
+               if (*next_buflen)
+                       scatterwalk_map_and_copy(next_buf, req->src,
+                                                to_hash - *buflen,
+                                                *next_buflen, 0);
                desc = edesc->hw_desc;
 
                edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
index ec8a291d62bab3c58f584a61699d3cdc0118a689..54093ffd0aefa8a7c3a40f887958c61b2222d03c 100644 (file)
@@ -671,7 +671,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg(
        d = bcm2835_dma_create_cb_chain(chan, direction, false,
                                        info, extra,
                                        frames, src, dst, 0, 0,
-                                       GFP_KERNEL);
+                                       GFP_NOWAIT);
        if (!d)
                return NULL;
 
index 131f3974740d5d75a67141f15b019b06d7011c8a..814853842e29f9e103beab75468de580ac34bca7 100644 (file)
@@ -253,7 +253,7 @@ static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
        mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT);
 #else
-       mtk_dma_set(pc, MTK_CQDMA_SRC2, 0);
+       mtk_dma_set(pc, MTK_CQDMA_DST2, 0);
 #endif
 
        /* setup the length */
index 2b4f2569816956621e1d6201851ae16d21db27f3..e2a5398f89b51129345fbb076710083ef24f9188 100644 (file)
@@ -1282,6 +1282,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
        enum dma_status status;
        unsigned int residue = 0;
        unsigned int dptr = 0;
+       unsigned int chcrb;
+       unsigned int tcrb;
+       unsigned int i;
 
        if (!desc)
                return 0;
@@ -1329,6 +1332,24 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
                return 0;
        }
 
+       /*
+        * We need to read two registers.
+        * Make sure the control register does not skip to next chunk
+        * while reading the counter.
+        * Trying it 3 times should be enough: Initial read, retry, retry
+        * for the paranoid.
+        */
+       for (i = 0; i < 3; i++) {
+               chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+                                           RCAR_DMACHCRB_DPTR_MASK;
+               tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
+               /* Still the same? */
+               if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
+                             RCAR_DMACHCRB_DPTR_MASK))
+                       break;
+       }
+       WARN_ONCE(i >= 3, "residue might be not continuous!");
+
        /*
         * In descriptor mode the descriptor running pointer is not maintained
         * by the interrupt handler, find the running descriptor from the
@@ -1336,8 +1357,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
         * mode just use the running descriptor pointer.
         */
        if (desc->hwdescs.use) {
-               dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
-                       RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
+               dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
                if (dptr == 0)
                        dptr = desc->nchunks;
                dptr--;
@@ -1355,7 +1375,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
        }
 
        /* Add the residue for the current chunk. */
-       residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
+       residue += tcrb << desc->xfer_shift;
 
        return residue;
 }
@@ -1368,6 +1388,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
        enum dma_status status;
        unsigned long flags;
        unsigned int residue;
+       bool cyclic;
 
        status = dma_cookie_status(chan, cookie, txstate);
        if (status == DMA_COMPLETE || !txstate)
@@ -1375,10 +1396,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
 
        spin_lock_irqsave(&rchan->lock, flags);
        residue = rcar_dmac_chan_get_residue(rchan, cookie);
+       cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
        spin_unlock_irqrestore(&rchan->lock, flags);
 
        /* if there's no residue, the cookie is complete */
-       if (!residue)
+       if (!residue && !cyclic)
                return DMA_COMPLETE;
 
        dma_set_residue(txstate, residue);
index 4e0eede599a8dedd8c306a65c12366a5c857a4ae..ac0301b695937c1168cac2055a5af41d47536379 100644 (file)
@@ -1578,11 +1578,9 @@ static int stm32_mdma_probe(struct platform_device *pdev)
 
        dmadev->nr_channels = nr_channels;
        dmadev->nr_requests = nr_requests;
-       ret = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
+       device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
                                       dmadev->ahb_addr_masks,
                                       count);
-       if (ret)
-               return ret;
        dmadev->nr_ahb_addr_masks = count;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index 8e17149655f069ff923e09c943295b469d87d076..540e8cd16ee6ec6ae0205a2f8c6193f129e80825 100644 (file)
@@ -116,7 +116,7 @@ config EXTCON_PALMAS
 
 config EXTCON_PTN5150
        tristate "NXP PTN5150 CC LOGIC USB EXTCON support"
-       depends on I2C && GPIOLIB || COMPILE_TEST
+       depends on I2C && (GPIOLIB || COMPILE_TEST)
        select REGMAP_I2C
        help
          Say Y here to enable support for USB peripheral and USB host
index 099d83e4e910e25146f5a22e67e854bd003cd687..fae2d5c433145363f9a75d953ae6f87a78fdf729 100644 (file)
@@ -416,11 +416,8 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
        nr++;
 }
 
-void __init dmi_memdev_walk(void)
+static void __init dmi_memdev_walk(void)
 {
-       if (!dmi_available)
-               return;
-
        if (dmi_walk_early(count_mem_devices) == 0 && dmi_memdev_nr) {
                dmi_memdev = dmi_alloc(sizeof(*dmi_memdev) * dmi_memdev_nr);
                if (dmi_memdev)
@@ -614,7 +611,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
        return 1;
 }
 
-void __init dmi_scan_machine(void)
+static void __init dmi_scan_machine(void)
 {
        char __iomem *p, *q;
        char buf[32];
@@ -769,15 +766,20 @@ static int __init dmi_init(void)
 subsys_initcall(dmi_init);
 
 /**
- * dmi_set_dump_stack_arch_desc - set arch description for dump_stack()
+ *     dmi_setup - scan and setup DMI system information
  *
- * Invoke dump_stack_set_arch_desc() with DMI system information so that
- * DMI identifiers are printed out on task dumps.  Arch boot code should
- * call this function after dmi_scan_machine() if it wants to print out DMI
- * identifiers on task dumps.
+ *     Scan the DMI system information. This setups DMI identifiers
+ *     (dmi_system_id) for printing it out on task dumps and prepares
+ *     DIMM entry information (dmi_memdev_info) from the SMBIOS table
+ *     for using this when reporting memory errors.
  */
-void __init dmi_set_dump_stack_arch_desc(void)
+void __init dmi_setup(void)
 {
+       dmi_scan_machine();
+       if (!dmi_available)
+               return;
+
+       dmi_memdev_walk();
        dump_stack_set_arch_desc("%s", dmi_ids_string);
 }
 
@@ -841,7 +843,7 @@ static bool dmi_is_end_of_table(const struct dmi_system_id *dmi)
  *     returns non zero or we hit the end. Callback function is called for
  *     each successful match. Returns the number of matches.
  *
- *     dmi_scan_machine must be called before this function is called.
+ *     dmi_setup must be called before this function is called.
  */
 int dmi_check_system(const struct dmi_system_id *list)
 {
@@ -871,7 +873,7 @@ EXPORT_SYMBOL(dmi_check_system);
  *     Walk the blacklist table until the first match is found.  Return the
  *     pointer to the matching entry or NULL if there's no match.
  *
- *     dmi_scan_machine must be called before this function is called.
+ *     dmi_setup must be called before this function is called.
  */
 const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list)
 {
index 0c1af675c3385fabe37a8ff44ba85a11af365b0a..e2ac5fa5531b9f4ce39e96d0953bb82251a49cbe 100644 (file)
@@ -162,13 +162,11 @@ void efi_virtmap_unload(void)
 static int __init arm_dmi_init(void)
 {
        /*
-        * On arm64/ARM, DMI depends on UEFI, and dmi_scan_machine() needs to
+        * On arm64/ARM, DMI depends on UEFI, and dmi_setup() needs to
         * be called early because dmi_id_init(), which is an arch_initcall
         * itself, depends on dmi_scan_machine() having been called already.
         */
-       dmi_scan_machine();
-       if (dmi_available)
-               dmi_set_dump_stack_arch_desc();
+       dmi_setup();
        return 0;
 }
 core_initcall(arm_dmi_init);
index b0103e16fc1b9d0b8958bd8029f96a97035bef20..b1f7b64652dbb3da8d416432901ba576fc689558 100644 (file)
@@ -71,7 +71,6 @@ CFLAGS_arm64-stub.o           := -DTEXT_OFFSET=$(TEXT_OFFSET)
 extra-$(CONFIG_EFI_ARMSTUB)    := $(lib-y)
 lib-$(CONFIG_EFI_ARMSTUB)      := $(patsubst %.o,%.stub.o,$(lib-y))
 
-STUBCOPY_RM-y                  := -R *ksymtab* -R *kcrctab*
 STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
                                   --prefix-symbols=__efistub_
 STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
@@ -86,12 +85,13 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
 # this time, use objcopy and leave all sections in place.
 #
 quiet_cmd_stubcopy = STUBCPY $@
-      cmd_stubcopy = if $(STRIP) --strip-debug $(STUBCOPY_RM-y) -o $@ $<; \
-                    then if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); \
-                    then (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
-                          rm -f $@; /bin/false);                         \
-                    else $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; fi        \
-                    else /bin/false; fi
+      cmd_stubcopy =                                                   \
+       $(STRIP) --strip-debug -o $@ $<;                                \
+       if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); then            \
+               echo "$@: absolute symbol references not allowed in the EFI stub" >&2; \
+               /bin/false;                                             \
+       fi;                                                             \
+       $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@
 
 #
 # ARM discards the .data section because it disallows r/w data in the
index 91b90c0cea731778bd524a13d801433e7df0a2ab..12acdac858208979438491e90c3782b064f9e952 100644 (file)
@@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
        if (err < 0)
                goto out;
 
-       if (err & BIT(pos))
-               err = -EACCES;
+       if (value & BIT(pos)) {
+               err = -EPERM;
+               goto out;
+       }
 
        err = 0;
 
index 854bce4fb9e7209b2697e9c662e65e4456c969c3..217507002dbc38ce7a34c64df751ac5887d304d6 100644 (file)
@@ -1224,6 +1224,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
 
        gpio->offset_timer =
                devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
+       if (!gpio->offset_timer)
+               return -ENOMEM;
 
        return aspeed_gpio_setup_irqs(gpio, pdev);
 }
index f0223cee97744825ee508e5c7ffe6057829359bf..77092268ee955fe280926f8426bd4613a949595f 100644 (file)
@@ -414,6 +414,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
                case IRQ_TYPE_EDGE_BOTH:
+                       sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
index 0ecd2369c2cad0daa5e08696ab85b91af5235a26..a09d2f9ebacc8d4909d79119333e344453ea6e0a 100644 (file)
@@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev)
        mutex_init(&exar_gpio->lock);
 
        index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
+       if (index < 0)
+               goto err_destroy;
 
        sprintf(exar_gpio->name, "exar_gpio%d", index);
        exar_gpio->gpio_chip.label = exar_gpio->name;
index 154d959e899323dcea54b2829c6a70a4127bf579..b6a4efce7c9285f0a26411246d615c90f498d0be 100644 (file)
@@ -204,8 +204,8 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
        struct gpio_mockup_chip *chip;
        struct seq_file *sfile;
        struct gpio_chip *gc;
+       int val, cnt;
        char buf[3];
-       int val, rv;
 
        if (*ppos != 0)
                return 0;
@@ -216,13 +216,9 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
        gc = &chip->gc;
 
        val = gpio_mockup_get(gc, priv->offset);
-       snprintf(buf, sizeof(buf), "%d\n", val);
+       cnt = snprintf(buf, sizeof(buf), "%d\n", val);
 
-       rv = copy_to_user(usr_buf, buf, sizeof(buf));
-       if (rv)
-               return rv;
-
-       return sizeof(buf) - 1;
+       return simple_read_from_buffer(usr_buf, size, ppos, buf, cnt);
 }
 
 static ssize_t gpio_mockup_debugfs_write(struct file *file,
index 8b9c3ab70f6eade458a501184ce764454819043d..6a3ec575a404ed9fa3dfcf8c78e56381789a8c02 100644 (file)
@@ -120,7 +120,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
         * to determine if the flags should have inverted semantics.
         */
        if (IS_ENABLED(CONFIG_SPI_MASTER) &&
-           of_property_read_bool(np, "cs-gpios")) {
+           of_property_read_bool(np, "cs-gpios") &&
+           !strcmp(propname, "cs-gpios")) {
                struct device_node *child;
                u32 cs;
                int ret;
@@ -142,16 +143,16 @@ static void of_gpio_flags_quirks(struct device_node *np,
                                 * conflict and the "spi-cs-high" flag will
                                 * take precedence.
                                 */
-                               if (of_property_read_bool(np, "spi-cs-high")) {
+                               if (of_property_read_bool(child, "spi-cs-high")) {
                                        if (*flags & OF_GPIO_ACTIVE_LOW) {
                                                pr_warn("%s GPIO handle specifies active low - ignored\n",
-                                                       of_node_full_name(np));
+                                                       of_node_full_name(child));
                                                *flags &= ~OF_GPIO_ACTIVE_LOW;
                                        }
                                } else {
                                        if (!(*flags & OF_GPIO_ACTIVE_LOW))
                                                pr_info("%s enforce active low on chipselect handle\n",
-                                                       of_node_full_name(np));
+                                                       of_node_full_name(child));
                                        *flags |= OF_GPIO_ACTIVE_LOW;
                                }
                                break;
@@ -717,7 +718,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
 
        of_node_get(chip->of_node);
 
-       return of_gpiochip_scan_gpios(chip);
+       status = of_gpiochip_scan_gpios(chip);
+       if (status) {
+               of_node_put(chip->of_node);
+               gpiochip_remove_pin_ranges(chip);
+       }
+
+       return status;
 }
 
 void of_gpiochip_remove(struct gpio_chip *chip)
index 144af07335815998c7238b72e01882cf26692ef8..bca3e7740ef66c8fac2b8f89866935e3655fc9c3 100644 (file)
@@ -1379,7 +1379,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
 
        status = gpiochip_add_irqchip(chip, lock_key, request_key);
        if (status)
-               goto err_remove_chip;
+               goto err_free_gpiochip_mask;
 
        status = of_gpiochip_add(chip);
        if (status)
@@ -1387,7 +1387,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
 
        status = gpiochip_init_valid_mask(chip);
        if (status)
-               goto err_remove_chip;
+               goto err_remove_of_chip;
 
        for (i = 0; i < chip->ngpio; i++) {
                struct gpio_desc *desc = &gdev->descs[i];
@@ -1415,14 +1415,18 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
        if (gpiolib_initialized) {
                status = gpiochip_setup_dev(gdev);
                if (status)
-                       goto err_remove_chip;
+                       goto err_remove_acpi_chip;
        }
        return 0;
 
-err_remove_chip:
+err_remove_acpi_chip:
        acpi_gpiochip_remove(chip);
+err_remove_of_chip:
        gpiochip_free_hogs(chip);
        of_gpiochip_remove(chip);
+err_remove_chip:
+       gpiochip_irqchip_remove(chip);
+err_free_gpiochip_mask:
        gpiochip_free_valid_mask(chip);
 err_remove_irqchip_mask:
        gpiochip_irqchip_free_valid_mask(chip);
@@ -2776,7 +2780,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
        }
 
        config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
-       return gpio_set_config(chip, gpio_chip_hwgpio(desc), config);
+       return chip->set_config(chip, gpio_chip_hwgpio(desc), config);
 }
 EXPORT_SYMBOL_GPL(gpiod_set_debounce);
 
@@ -2813,7 +2817,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
        packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
                                          !transitory);
        gpio = gpio_chip_hwgpio(desc);
-       rc = gpio_set_config(chip, gpio, packed);
+       rc = chip->set_config(chip, gpio, packed);
        if (rc == -ENOTSUPP) {
                dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
                                gpio);
index 4f8fb4ecde3419fe8449ddfcea859f17242e6919..79fb302fb9543f93cfb9738700f53e34006e869c 100644 (file)
@@ -3165,6 +3165,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
 
                /* No need to recover an evicted BO */
                if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
+                   shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
                    shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
                        continue;
 
@@ -3173,11 +3174,16 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
                        break;
 
                if (fence) {
-                       r = dma_fence_wait_timeout(fence, false, tmo);
+                       tmo = dma_fence_wait_timeout(fence, false, tmo);
                        dma_fence_put(fence);
                        fence = next;
-                       if (r <= 0)
+                       if (tmo == 0) {
+                               r = -ETIMEDOUT;
                                break;
+                       } else if (tmo < 0) {
+                               r = tmo;
+                               break;
+                       }
                } else {
                        fence = next;
                }
@@ -3188,8 +3194,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
                tmo = dma_fence_wait_timeout(fence, false, tmo);
        dma_fence_put(fence);
 
-       if (r <= 0 || tmo <= 0) {
-               DRM_ERROR("recover vram bo from shadow failed\n");
+       if (r < 0 || tmo <= 0) {
+               DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
                return -EIO;
        }
 
@@ -3625,6 +3631,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
        struct pci_dev *pdev = adev->pdev;
        enum pci_bus_speed cur_speed;
        enum pcie_link_width cur_width;
+       u32 ret = 1;
 
        *speed = PCI_SPEED_UNKNOWN;
        *width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -3632,6 +3639,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
        while (pdev) {
                cur_speed = pcie_get_speed_cap(pdev);
                cur_width = pcie_get_width_cap(pdev);
+               ret = pcie_bandwidth_available(adev->pdev, NULL,
+                                                      NULL, &cur_width);
+               if (!ret)
+                       cur_width = PCIE_LNK_WIDTH_RESRV;
 
                if (cur_speed != PCI_SPEED_UNKNOWN) {
                        if (*speed == PCI_SPEED_UNKNOWN)
index 0b8ef2d27d6b2b8e60e0959f0cb8e742e4de3c7f..fe393a46f8811dc452dc3db6d062d0aa850e6b47 100644 (file)
@@ -35,6 +35,7 @@
 #include "amdgpu_trace.h"
 
 #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
+#define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT        msecs_to_jiffies(2000)
 
 /*
  * IB
@@ -344,6 +345,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
                 * cost waiting for it coming back under RUNTIME only
                */
                tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
+       } else if (adev->gmc.xgmi.hive_id) {
+               tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
        }
 
        for (i = 0; i < adev->num_rings; ++i) {
index bfa9062ce6b9fed957a5e52c592dc57ff257a02e..16fcb56c232b55eef2e36027b624ddb6986aa68d 100644 (file)
@@ -700,6 +700,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        struct amdgpu_vm_bo_base *bo_base, *tmp;
        int r = 0;
 
+       vm->bulk_moveable &= list_empty(&vm->evicted);
+
        list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
                struct amdgpu_bo *bo = bo_base->bo;
 
index d0309e8c9d12cdafa95d2a23e84018f4bb6b8035..a11db2b1a63f41e16acd4df34a24b2f3e6db9140 100644 (file)
@@ -2405,8 +2405,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
        /* disable CG */
        WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
 
-       adev->gfx.rlc.funcs->reset(adev);
-
        gfx_v9_0_init_pg(adev);
 
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
index 600259b4e29184a5ce05f3441151a7a2f2bf7223..2fe8397241ea4c128ed7fffc924955ac483daec8 100644 (file)
@@ -742,7 +742,7 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
                }
 
                ring->vm_inv_eng = inv_eng - 1;
-               change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub]));
+               vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
 
                dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
                         ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
index d0d966d6080a6dda87d57d2d8ee1ed2b58a1444a..1696644ec022391d24b93df9f1dacd23079bd72e 100644 (file)
@@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
                tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
                                    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
        }
+       WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
 
        tmp = mmVM_L2_CNTL4_DEFAULT;
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
index 8be9677c0c07dae65e3c69dafab241e3b16ff975..cf9a49f49d3a41a99bb96dc75c38f24e72579367 100644 (file)
@@ -320,6 +320,7 @@ static const struct kfd_deviceid supported_devices[] = {
        { 0x9876, &carrizo_device_info },       /* Carrizo */
        { 0x9877, &carrizo_device_info },       /* Carrizo */
        { 0x15DD, &raven_device_info },         /* Raven */
+       { 0x15D8, &raven_device_info },         /* Raven */
 #endif
        { 0x67A0, &hawaii_device_info },        /* Hawaii */
        { 0x67A1, &hawaii_device_info },        /* Hawaii */
index fb27783d7a542d565e1c002d03fc051d055039be..3082b55b1e774fd31b4293c402c41174df28e9a9 100644 (file)
@@ -4533,6 +4533,7 @@ static void handle_cursor_update(struct drm_plane *plane,
        amdgpu_crtc->cursor_width = plane->state->crtc_w;
        amdgpu_crtc->cursor_height = plane->state->crtc_h;
 
+       memset(&attributes, 0, sizeof(attributes));
        attributes.address.high_part = upper_32_bits(address);
        attributes.address.low_part  = lower_32_bits(address);
        attributes.width             = plane->state->crtc_w;
@@ -5429,9 +5430,11 @@ static void get_freesync_config_for_crtc(
        struct amdgpu_dm_connector *aconnector =
                        to_amdgpu_dm_connector(new_con_state->base.connector);
        struct drm_display_mode *mode = &new_crtc_state->base.mode;
+       int vrefresh = drm_mode_vrefresh(mode);
 
        new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
-               aconnector->min_vfreq <= drm_mode_vrefresh(mode);
+                                       vrefresh >= aconnector->min_vfreq &&
+                                       vrefresh <= aconnector->max_vfreq;
 
        if (new_crtc_state->vrr_supported) {
                new_crtc_state->stream->ignore_msa_timing_param = true;
index c68fbd55db3ca6f01c49b86e60a584dfe8d90ff4..a6cda201c964c5bc918e8d693c2aa2fccf65eb58 100644 (file)
@@ -1377,6 +1377,11 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
                return UPDATE_TYPE_FULL;
        }
 
+       if (u->surface->force_full_update) {
+               update_flags->bits.full_update = 1;
+               return UPDATE_TYPE_FULL;
+       }
+
        type = get_plane_info_update_type(u);
        elevate_update_type(&overall_type, type);
 
@@ -1802,6 +1807,14 @@ void dc_commit_updates_for_stream(struct dc *dc,
                }
 
                dc_resource_state_copy_construct(state, context);
+
+               for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                       struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+                       struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+                       if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
+                               new_pipe->plane_state->force_full_update = true;
+               }
        }
 
 
@@ -1838,6 +1851,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
                dc->current_state = context;
                dc_release_state(old);
 
+               for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                       struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+                       if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
+                               pipe_ctx->plane_state->force_full_update = false;
+               }
        }
        /*let's use current_state to update watermark etc*/
        if (update_type >= UPDATE_TYPE_FULL)
index 4eba3c4800b63bef00ec9fd532919aa84ca72126..ea18e9c2d8cea5c65582274a297d67b1d0fbb82d 100644 (file)
@@ -2660,12 +2660,18 @@ void core_link_enable_stream(
 void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
 {
        struct dc  *core_dc = pipe_ctx->stream->ctx->dc;
+       struct dc_stream_state *stream = pipe_ctx->stream;
 
        core_dc->hwss.blank_stream(pipe_ctx);
 
        if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                deallocate_mst_payload(pipe_ctx);
 
+       if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+               dal_ddc_service_write_scdc_data(
+                       stream->link->ddc, 0,
+                       stream->timing.flags.LTE_340MCSC_SCRAMBLE);
+
        core_dc->hwss.disable_stream(pipe_ctx, option);
 
        disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
index 1a7fd6aa77ebb213168cd477452ac1dd94d5aadc..0515095574e735e0535ee17ce3369168557c201e 100644 (file)
@@ -503,6 +503,9 @@ struct dc_plane_state {
        struct dc_plane_status status;
        struct dc_context *ctx;
 
+       /* HACK: Workaround for forcing full reprogramming under some conditions */
+       bool force_full_update;
+
        /* private to dc_surface.c */
        enum dc_irq_source irq_source;
        struct kref refcount;
index 4febf4ef7240e6aef2610063b55f3aee636bbca0..4fe3664fb49508e7f9c07ddc69f5b610fd884d1d 100644 (file)
@@ -190,6 +190,12 @@ static void submit_channel_request(
                                1,
                                0);
        }
+
+       REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
+
+       REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
+                               10, aux110->timeout_period/10);
+
        /* set the delay and the number of bytes to write */
 
        /* The length include
@@ -242,9 +248,6 @@ static void submit_channel_request(
                }
        }
 
-       REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
-       REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
-                               10, aux110->timeout_period/10);
        REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
 }
 
index d27f22c05e4b5abd0085fb7252fccba5296f6831..e28ed6a00ff4236ffaef4346528dc1ecbb179543 100644 (file)
@@ -71,11 +71,11 @@ enum {      /* This is the timeout as defined in DP 1.2a,
         * at most within ~240usec. That means,
         * increasing this timeout will not affect normal operation,
         * and we'll timeout after
-        * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
+        * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 2400usec.
         * This timeout is especially important for
-        * resume from S3 and CTS.
+        * converters, resume from S3, and CTS.
         */
-       SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
+       SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 6
 };
 
 struct dce_aux {
index 683829466a44c4279db97fbb72338fef9851b404..0ba68d41b9c37b91064a5defbbcf62a3160df53f 100644 (file)
@@ -1150,28 +1150,9 @@ void hubp1_cursor_set_position(
        REG_UPDATE(CURSOR_CONTROL,
                        CURSOR_ENABLE, cur_en);
 
-       //account for cases where we see negative offset relative to overlay plane
-       if (src_x_offset < 0 && src_y_offset < 0) {
-               REG_SET_2(CURSOR_POSITION, 0,
-                       CURSOR_X_POSITION, 0,
-                       CURSOR_Y_POSITION, 0);
-               x_hotspot -= src_x_offset;
-               y_hotspot -= src_y_offset;
-       } else if (src_x_offset < 0) {
-               REG_SET_2(CURSOR_POSITION, 0,
-                       CURSOR_X_POSITION, 0,
-                       CURSOR_Y_POSITION, pos->y);
-               x_hotspot -= src_x_offset;
-       } else if (src_y_offset < 0) {
-               REG_SET_2(CURSOR_POSITION, 0,
+       REG_SET_2(CURSOR_POSITION, 0,
                        CURSOR_X_POSITION, pos->x,
-                       CURSOR_Y_POSITION, 0);
-               y_hotspot -= src_y_offset;
-       } else {
-               REG_SET_2(CURSOR_POSITION, 0,
-                               CURSOR_X_POSITION, pos->x,
-                               CURSOR_Y_POSITION, pos->y);
-       }
+                       CURSOR_Y_POSITION, pos->y);
 
        REG_SET_2(CURSOR_HOT_SPOT, 0,
                        CURSOR_HOT_SPOT_X, x_hotspot,
index 9aa7bec1b5fe6f3aeb67da66d4b88b2e16966bbb..23b5b94a4939ac809c40448f1aa33e5d1500f93e 100644 (file)
@@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
         *   MP0CLK DS
         */
        data->registry_data.disallowed_features = 0xE0041C00;
+       /* ECC feature should be disabled on old SMUs */
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
+       hwmgr->smu_version = smum_get_argument(hwmgr);
+       if (hwmgr->smu_version < 0x282100)
+               data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
+
        data->registry_data.od_state_in_dc_support = 0;
        data->registry_data.thermal_support = 1;
        data->registry_data.skip_baco_hardware = 0;
@@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
        data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
        data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
        data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
+       data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
 
        for (i = 0; i < GNLD_FEATURES_MAX; i++) {
                data->smu_features[i].smu_feature_bitmap =
@@ -3020,7 +3027,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
                                "FCLK_DS",
                                "MP1CLK_DS",
                                "MP0CLK_DS",
-                               "XGMI"};
+                               "XGMI",
+                               "ECC"};
        static const char *output_title[] = {
                                "FEATURES",
                                "BITMASK",
@@ -3462,6 +3470,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
        struct vega20_single_dpm_table *dpm_table;
        bool vblank_too_short = false;
        bool disable_mclk_switching;
+       bool disable_fclk_switching;
        uint32_t i, latency;
 
        disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
@@ -3537,13 +3546,20 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
        if (hwmgr->display_config->nb_pstate_switch_disable)
                dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
 
+       if ((disable_mclk_switching &&
+           (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
+            hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
+               disable_fclk_switching = true;
+       else
+               disable_fclk_switching = false;
+
        /* fclk */
        dpm_table = &(data->dpm_table.fclk_table);
        dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
        dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
        dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
        dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
-       if (hwmgr->display_config->nb_pstate_switch_disable)
+       if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
                dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
 
        /* vclk */
index a5bc758ae09728327bd1230dbbf9969460eba263..ac2a3118a0ae779224be91fd750dda2319027a64 100644 (file)
@@ -80,6 +80,7 @@ enum {
        GNLD_DS_MP1CLK,
        GNLD_DS_MP0CLK,
        GNLD_XGMI,
+       GNLD_ECC,
 
        GNLD_FEATURES_MAX
 };
index 63d5cf69154967b90aa696de2ae5c1d407bd579f..195c4ae67058554d1d2bf194cedf9762f79a96e3 100644 (file)
@@ -99,7 +99,7 @@
 #define FEATURE_DS_MP1CLK_BIT           30
 #define FEATURE_DS_MP0CLK_BIT           31
 #define FEATURE_XGMI_BIT                32
-#define FEATURE_SPARE_33_BIT            33
+#define FEATURE_ECC_BIT                 33
 #define FEATURE_SPARE_34_BIT            34
 #define FEATURE_SPARE_35_BIT            35
 #define FEATURE_SPARE_36_BIT            36
 #define FEATURE_DS_FCLK_MASK            (1 << FEATURE_DS_FCLK_BIT            )
 #define FEATURE_DS_MP1CLK_MASK          (1 << FEATURE_DS_MP1CLK_BIT          )
 #define FEATURE_DS_MP0CLK_MASK          (1 << FEATURE_DS_MP0CLK_BIT          )
-#define FEATURE_XGMI_MASK               (1 << FEATURE_XGMI_BIT               )
+#define FEATURE_XGMI_MASK               (1ULL << FEATURE_XGMI_BIT               )
+#define FEATURE_ECC_MASK                (1ULL << FEATURE_ECC_BIT                )
 
 #define DPM_OVERRIDE_DISABLE_SOCCLK_PID             0x00000001
 #define DPM_OVERRIDE_DISABLE_UCLK_PID               0x00000002
index a63e5f0dae56ad3de5a372ecd167a350d5b89457..ab7968c8f6a29937177c0a464f6da4db9e297631 100644 (file)
@@ -1037,6 +1037,35 @@ void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
 }
 EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_write);
 
+/* Filter out invalid setups to avoid configuring SCDC and scrambling */
+static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi)
+{
+       struct drm_display_info *display = &hdmi->connector.display_info;
+
+       /* Completely disable SCDC support for older controllers */
+       if (hdmi->version < 0x200a)
+               return false;
+
+       /* Disable if no DDC bus */
+       if (!hdmi->ddc)
+               return false;
+
+       /* Disable if SCDC is not supported, or if an HF-VSDB block is absent */
+       if (!display->hdmi.scdc.supported ||
+           !display->hdmi.scdc.scrambling.supported)
+               return false;
+
+       /*
+        * Disable if display only support low TMDS rates and scrambling
+        * for low rates is not supported either
+        */
+       if (!display->hdmi.scdc.scrambling.low_rates &&
+           display->max_tmds_clock <= 340000)
+               return false;
+
+       return true;
+}
+
 /*
  * HDMI2.0 Specifies the following procedure for High TMDS Bit Rates:
  * - The Source shall suspend transmission of the TMDS clock and data
@@ -1055,7 +1084,7 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi)
        unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock;
 
        /* Control for TMDS Bit Period/TMDS Clock-Period Ratio */
-       if (hdmi->connector.display_info.hdmi.scdc.supported) {
+       if (dw_hdmi_support_scdc(hdmi)) {
                if (mtmdsclock > HDMI14_MAX_TMDSCLK)
                        drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 1);
                else
@@ -1579,8 +1608,9 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
 
        /* Set up HDMI_FC_INVIDCONF */
        inv_val = (hdmi->hdmi_data.hdcp_enable ||
-                  vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
-                  hdmi_info->scdc.scrambling.low_rates ?
+                  (dw_hdmi_support_scdc(hdmi) &&
+                   (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
+                    hdmi_info->scdc.scrambling.low_rates)) ?
                HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE :
                HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE);
 
@@ -1646,7 +1676,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
        }
 
        /* Scrambling Control */
-       if (hdmi_info->scdc.supported) {
+       if (dw_hdmi_support_scdc(hdmi)) {
                if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK ||
                    hdmi_info->scdc.scrambling.low_rates) {
                        /*
@@ -1658,13 +1688,13 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
                         * Source Devices compliant shall set the
                         * Source Version = 1.
                         */
-                       drm_scdc_readb(&hdmi->i2c->adap, SCDC_SINK_VERSION,
+                       drm_scdc_readb(hdmi->ddc, SCDC_SINK_VERSION,
                                       &bytes);
-                       drm_scdc_writeb(&hdmi->i2c->adap, SCDC_SOURCE_VERSION,
+                       drm_scdc_writeb(hdmi->ddc, SCDC_SOURCE_VERSION,
                                min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION));
 
                        /* Enabled Scrambling in the Sink */
-                       drm_scdc_set_scrambling(&hdmi->i2c->adap, 1);
+                       drm_scdc_set_scrambling(hdmi->ddc, 1);
 
                        /*
                         * To activate the scrambler feature, you must ensure
@@ -1680,7 +1710,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
                        hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL);
                        hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ,
                                    HDMI_MC_SWRSTZ);
-                       drm_scdc_set_scrambling(&hdmi->i2c->adap, 0);
+                       drm_scdc_set_scrambling(hdmi->ddc, 0);
                }
        }
 
@@ -1774,6 +1804,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
         * iteration for others.
         * The Amlogic Meson GX SoCs (v2.01a) have been identified as needing
         * the workaround with a single iteration.
+        * The Rockchip RK3288 SoC (v2.00a) and RK3328/RK3399 SoCs (v2.11a) have
+        * been identified as needing the workaround with a single iteration.
         */
 
        switch (hdmi->version) {
@@ -1782,7 +1814,9 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
                break;
        case 0x131a:
        case 0x132a:
+       case 0x200a:
        case 0x201a:
+       case 0x211a:
        case 0x212a:
                count = 1;
                break;
index 40ac1984803459b7a0e8f67e09f81b61820035ef..fbb76332cc9f149c0cc037a6d35a5ed9c63d1baa 100644 (file)
@@ -1034,7 +1034,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
                        funcs->atomic_disable(crtc, old_crtc_state);
                else if (funcs->disable)
                        funcs->disable(crtc);
-               else
+               else if (funcs->dpms)
                        funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 
                if (!(dev->irq_enabled && dev->num_crtcs))
@@ -1277,10 +1277,9 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
                if (new_crtc_state->enable) {
                        DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
                                         crtc->base.id, crtc->name);
-
                        if (funcs->atomic_enable)
                                funcs->atomic_enable(crtc, old_crtc_state);
-                       else
+                       else if (funcs->commit)
                                funcs->commit(crtc);
                }
        }
index 381581b01d485e581df8bcebcd6983b01bc8a488..05bbc2b622fc1094a2a8f85ce060d0805eae0f7e 100644 (file)
@@ -376,11 +376,7 @@ void drm_dev_unplug(struct drm_device *dev)
        synchronize_srcu(&drm_unplug_srcu);
 
        drm_dev_unregister(dev);
-
-       mutex_lock(&drm_global_mutex);
-       if (dev->open_count == 0)
-               drm_dev_put(dev);
-       mutex_unlock(&drm_global_mutex);
+       drm_dev_put(dev);
 }
 EXPORT_SYMBOL(drm_dev_unplug);
 
index 0e9349ff2d16a64dd6628ab47de8f9ab0271d632..af2ab640cadbb05105325a0de2b31ae5f5c70ccf 100644 (file)
@@ -1963,7 +1963,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
                                best_depth = fmt->depth;
                }
        }
-       if (sizes.surface_depth != best_depth) {
+       if (sizes.surface_depth != best_depth && best_depth) {
                DRM_INFO("requested bpp %d, scaled depth down to %d",
                         sizes.surface_bpp, best_depth);
                sizes.surface_depth = best_depth;
index 83a5bbca6e7e089f10d75ea723ac982b7df61356..7caa3c7ed9789901e4aa5df2c2204326cfe39c27 100644 (file)
@@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp)
 
        drm_close_helper(filp);
 
-       if (!--dev->open_count) {
+       if (!--dev->open_count)
                drm_lastclose(dev);
-               if (drm_dev_is_unplugged(dev))
-                       drm_put_dev(dev);
-       }
+
        mutex_unlock(&drm_global_mutex);
 
        drm_minor_release(minor);
index 2b4f373736c7ec537a688371f206937f87caa0ad..8b4cd31ce7bdf2a142f8b5c13efe649a157e4b0c 100644 (file)
 static noinline void save_stack(struct drm_mm_node *node)
 {
        unsigned long entries[STACKDEPTH];
-       struct stack_trace trace = {
-               .entries = entries,
-               .max_entries = STACKDEPTH,
-               .skip = 1
-       };
+       unsigned int n;
 
-       save_stack_trace(&trace);
-       if (trace.nr_entries != 0 &&
-           trace.entries[trace.nr_entries-1] == ULONG_MAX)
-               trace.nr_entries--;
+       n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
 
        /* May be called under spinlock, so avoid sleeping */
-       node->stack = depot_save_stack(&trace, GFP_NOWAIT);
+       node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
 }
 
 static void show_leaks(struct drm_mm *mm)
 {
        struct drm_mm_node *node;
-       unsigned long entries[STACKDEPTH];
+       unsigned long *entries;
+       unsigned int nr_entries;
        char *buf;
 
        buf = kmalloc(BUFSZ, GFP_KERNEL);
@@ -132,19 +126,14 @@ static void show_leaks(struct drm_mm *mm)
                return;
 
        list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
-               struct stack_trace trace = {
-                       .entries = entries,
-                       .max_entries = STACKDEPTH
-               };
-
                if (!node->stack) {
                        DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
                                  node->start, node->size);
                        continue;
                }
 
-               depot_fetch_stack(node->stack, &trace);
-               snprint_stack_trace(buf, BUFSZ, &trace, 0);
+               nr_entries = stack_depot_fetch(node->stack, &entries);
+               stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
                DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
                          node->start, node->size, buf);
        }
index 0573eab0e190f6d76d0e970a9b0b37c67c6dfe3a..f35e4ab55b270132871aea56af219483349e43d9 100644 (file)
@@ -20,6 +20,7 @@
 #include "regs-vp.h"
 
 #include <linux/kernel.h>
+#include <linux/ktime.h>
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/i2c.h>
@@ -352,15 +353,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha)
        mixer_reg_write(ctx, MXR_VIDEO_CFG, val);
 }
 
-static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
+static bool mixer_is_synced(struct mixer_context *ctx)
 {
-       /* block update on vsync */
-       mixer_reg_writemask(ctx, MXR_STATUS, enable ?
-                       MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
+       u32 base, shadow;
 
+       if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
+           ctx->mxr_ver == MXR_VER_128_0_0_184)
+               return !(mixer_reg_read(ctx, MXR_CFG) &
+                        MXR_CFG_LAYER_UPDATE_COUNT_MASK);
+
+       if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
+           vp_reg_read(ctx, VP_SHADOW_UPDATE))
+               return false;
+
+       base = mixer_reg_read(ctx, MXR_CFG);
+       shadow = mixer_reg_read(ctx, MXR_CFG_S);
+       if (base != shadow)
+               return false;
+
+       base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
+       shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
+       if (base != shadow)
+               return false;
+
+       base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
+       shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
+       if (base != shadow)
+               return false;
+
+       return true;
+}
+
+static int mixer_wait_for_sync(struct mixer_context *ctx)
+{
+       ktime_t timeout = ktime_add_us(ktime_get(), 100000);
+
+       while (!mixer_is_synced(ctx)) {
+               usleep_range(1000, 2000);
+               if (ktime_compare(ktime_get(), timeout) > 0)
+                       return -ETIMEDOUT;
+       }
+       return 0;
+}
+
+static void mixer_disable_sync(struct mixer_context *ctx)
+{
+       mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE);
+}
+
+static void mixer_enable_sync(struct mixer_context *ctx)
+{
+       if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
+           ctx->mxr_ver == MXR_VER_128_0_0_184)
+               mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
+       mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE);
        if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
-               vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ?
-                       VP_SHADOW_UPDATE_ENABLE : 0);
+               vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE);
 }
 
 static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
@@ -498,7 +546,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
 
        spin_lock_irqsave(&ctx->reg_slock, flags);
 
-       vp_reg_write(ctx, VP_SHADOW_UPDATE, 1);
        /* interlace or progressive scan mode */
        val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
        vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
@@ -553,11 +600,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
        vp_regs_dump(ctx);
 }
 
-static void mixer_layer_update(struct mixer_context *ctx)
-{
-       mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
-}
-
 static void mixer_graph_buffer(struct mixer_context *ctx,
                               struct exynos_drm_plane *plane)
 {
@@ -640,11 +682,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
        mixer_cfg_layer(ctx, win, priority, true);
        mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha);
 
-       /* layer update mandatory for mixer 16.0.33.0 */
-       if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
-               ctx->mxr_ver == MXR_VER_128_0_0_184)
-               mixer_layer_update(ctx);
-
        spin_unlock_irqrestore(&ctx->reg_slock, flags);
 
        mixer_regs_dump(ctx);
@@ -709,7 +746,7 @@ static void mixer_win_reset(struct mixer_context *ctx)
 static irqreturn_t mixer_irq_handler(int irq, void *arg)
 {
        struct mixer_context *ctx = arg;
-       u32 val, base, shadow;
+       u32 val;
 
        spin_lock(&ctx->reg_slock);
 
@@ -723,26 +760,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
                val &= ~MXR_INT_STATUS_VSYNC;
 
                /* interlace scan need to check shadow register */
-               if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
-                       if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
-                           vp_reg_read(ctx, VP_SHADOW_UPDATE))
-                               goto out;
-
-                       base = mixer_reg_read(ctx, MXR_CFG);
-                       shadow = mixer_reg_read(ctx, MXR_CFG_S);
-                       if (base != shadow)
-                               goto out;
-
-                       base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
-                       shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
-                       if (base != shadow)
-                               goto out;
-
-                       base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
-                       shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
-                       if (base != shadow)
-                               goto out;
-               }
+               if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)
+                   && !mixer_is_synced(ctx))
+                       goto out;
 
                drm_crtc_handle_vblank(&ctx->crtc->base);
        }
@@ -917,12 +937,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
 
 static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
 {
-       struct mixer_context *mixer_ctx = crtc->ctx;
+       struct mixer_context *ctx = crtc->ctx;
 
-       if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
+       if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
                return;
 
-       mixer_vsync_set_update(mixer_ctx, false);
+       if (mixer_wait_for_sync(ctx))
+               dev_err(ctx->dev, "timeout waiting for VSYNC\n");
+       mixer_disable_sync(ctx);
 }
 
 static void mixer_update_plane(struct exynos_drm_crtc *crtc,
@@ -964,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
        if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
                return;
 
-       mixer_vsync_set_update(mixer_ctx, true);
+       mixer_enable_sync(mixer_ctx);
        exynos_crtc_handle_event(crtc);
 }
 
@@ -979,7 +1001,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
 
        exynos_drm_pipe_clk_enable(crtc, true);
 
-       mixer_vsync_set_update(ctx, false);
+       mixer_disable_sync(ctx);
 
        mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
 
@@ -992,7 +1014,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
 
        mixer_commit(ctx);
 
-       mixer_vsync_set_update(ctx, true);
+       mixer_enable_sync(ctx);
 
        set_bit(MXR_BIT_POWERED, &ctx->flags);
 }
index 35b4ec3f7618b887e5661d0d652cca99b6ed02c6..3592d04c33b283cac0abd2f432ce313194d2b606 100644 (file)
@@ -1441,7 +1441,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
        }
 
        if (index_mode) {
-               if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
+               if (guest_gma >= I915_GTT_PAGE_SIZE) {
                        ret = -EFAULT;
                        goto err;
                }
index 035479e273beca866575c4bef70438583029d2df..e3f9caa7839f7347e1eaa25a798c6574446e813f 100644 (file)
@@ -448,7 +448,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
 /**
  * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
  * @vgpu: a vGPU
- * @conncted: link state
+ * @connected: link state
  *
  * This function is used to trigger hotplug interrupt for vGPU
  *
index 3e7e2b80c8579017cecdda478bc6166e1f46e061..69a9a1b2ea4ac44ba7d8f6530f99f59a9958076f 100644 (file)
@@ -209,7 +209,7 @@ static int vgpu_get_plane_info(struct drm_device *dev,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_vgpu_primary_plane_format p;
        struct intel_vgpu_cursor_plane_format c;
-       int ret;
+       int ret, tile_height = 1;
 
        if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
                ret = intel_vgpu_decode_primary_plane(vgpu, &p);
@@ -228,19 +228,19 @@ static int vgpu_get_plane_info(struct drm_device *dev,
                        break;
                case PLANE_CTL_TILED_X:
                        info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
+                       tile_height = 8;
                        break;
                case PLANE_CTL_TILED_Y:
                        info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
+                       tile_height = 32;
                        break;
                case PLANE_CTL_TILED_YF:
                        info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
+                       tile_height = 32;
                        break;
                default:
                        gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
                }
-
-               info->size = (((p.stride * p.height * p.bpp) / 8) +
-                             (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
                ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
                if (ret)
@@ -262,14 +262,13 @@ static int vgpu_get_plane_info(struct drm_device *dev,
                        info->x_hot = UINT_MAX;
                        info->y_hot = UINT_MAX;
                }
-
-               info->size = (((info->stride * c.height * c.bpp) / 8)
-                               + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        } else {
                gvt_vgpu_err("invalid plane id:%d\n", plane_id);
                return -EINVAL;
        }
 
+       info->size = (info->stride * roundup(info->height, tile_height)
+                     + PAGE_SIZE - 1) >> PAGE_SHIFT;
        if (info->size == 0) {
                gvt_vgpu_err("fb size is zero\n");
                return -EINVAL;
index c7103dd2d8d571fde462f173dcc67efc0973cc69..9814773882ec2b875ae2db00a22768deed72c618 100644 (file)
@@ -750,14 +750,20 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
 
 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
 {
-       struct intel_vgpu_ppgtt_spt *spt;
+       struct intel_vgpu_ppgtt_spt *spt, *spn;
        struct radix_tree_iter iter;
-       void **slot;
+       LIST_HEAD(all_spt);
+       void __rcu **slot;
 
+       rcu_read_lock();
        radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
                spt = radix_tree_deref_slot(slot);
-               ppgtt_free_spt(spt);
+               list_move(&spt->post_shadow_list, &all_spt);
        }
+       rcu_read_unlock();
+
+       list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
+               ppgtt_free_spt(spt);
 }
 
 static int ppgtt_handle_guest_write_page_table_bytes(
@@ -1882,7 +1888,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
        }
 
        list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
+
+       mutex_lock(&gvt->gtt.ppgtt_mm_lock);
        list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
+       mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
+
        return mm;
 }
 
@@ -1942,7 +1952,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
  */
 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
 {
-       atomic_dec(&mm->pincount);
+       atomic_dec_if_positive(&mm->pincount);
 }
 
 /**
@@ -1967,9 +1977,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
                if (ret)
                        return ret;
 
+               mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
                list_move_tail(&mm->ppgtt_mm.lru_list,
                               &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
-
+               mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
        }
 
        return 0;
@@ -1980,6 +1991,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
        struct intel_vgpu_mm *mm;
        struct list_head *pos, *n;
 
+       mutex_lock(&gvt->gtt.ppgtt_mm_lock);
+
        list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
                mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
 
@@ -1987,9 +2000,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
                        continue;
 
                list_del_init(&mm->ppgtt_mm.lru_list);
+               mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
                invalidate_ppgtt_mm(mm);
                return 1;
        }
+       mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
        return 0;
 }
 
@@ -2659,6 +2674,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
                }
        }
        INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
+       mutex_init(&gvt->gtt.ppgtt_mm_lock);
        return 0;
 }
 
@@ -2699,7 +2715,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
        list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
                mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
                if (mm->type == INTEL_GVT_MM_PPGTT) {
+                       mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
                        list_del_init(&mm->ppgtt_mm.lru_list);
+                       mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
                        if (mm->ppgtt_mm.shadowed)
                                invalidate_ppgtt_mm(mm);
                }
index d8cb04cc946dff3e19466ff387089db96c226d53..edb610dc5d8689e49f22310b310133b9cb3ee921 100644 (file)
@@ -88,6 +88,7 @@ struct intel_gvt_gtt {
        void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
        struct list_head oos_page_use_list_head;
        struct list_head oos_page_free_list_head;
+       struct mutex ppgtt_mm_lock;
        struct list_head ppgtt_mm_lru_list_head;
 
        struct page *scratch_page;
index d5fcc447d22f0d0663a4664c5767d72fa199cb0d..a68addf95c230f2edcc9b5b21860e9aee406bc27 100644 (file)
@@ -905,7 +905,7 @@ static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
                void *buf, unsigned long count, bool is_write)
 {
-       void *aperture_va;
+       void __iomem *aperture_va;
 
        if (!intel_vgpu_in_aperture(vgpu, off) ||
            !intel_vgpu_in_aperture(vgpu, off + count)) {
@@ -920,9 +920,9 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
                return -EIO;
 
        if (is_write)
-               memcpy(aperture_va + offset_in_page(off), buf, count);
+               memcpy_toio(aperture_va + offset_in_page(off), buf, count);
        else
-               memcpy(buf, aperture_va + offset_in_page(off), count);
+               memcpy_fromio(buf, aperture_va + offset_in_page(off), count);
 
        io_mapping_unmap(aperture_va);
 
index 7d84cfb9051ac886579648ac7bb2cc5e2a70b3fa..7902fb162d09441f9b4f65447f5e6619b8792c01 100644 (file)
@@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
 
        {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
        {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
+       {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
 
        {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
        {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
index 1bb8f936fdaa75f2ee738bdf3235a247fac90fe8..05b953793316b28ac1fb19c902474e468ba828b0 100644 (file)
@@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
        int i = 0;
 
        if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
-               return -1;
+               return -EINVAL;
 
        if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
                px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
@@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        if (workload->shadow)
                return 0;
 
-       ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
-       if (ret < 0) {
-               gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
-               return ret;
-       }
-
        /* pin shadow context by gvt even the shadow context will be pinned
         * when i915 alloc request. That is because gvt will update the guest
         * context from shadow context when workload is completed, and at that
@@ -678,6 +672,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       struct intel_vgpu_submission *s = &vgpu->submission;
+       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+       struct i915_request *rq;
        int ring_id = workload->ring_id;
        int ret;
 
@@ -687,6 +684,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
        mutex_lock(&vgpu->vgpu_lock);
        mutex_lock(&dev_priv->drm.struct_mutex);
 
+       ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
+       if (ret < 0) {
+               gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+               goto err_req;
+       }
+
        ret = intel_gvt_workload_req_alloc(workload);
        if (ret)
                goto err_req;
@@ -703,6 +706,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 
        ret = prepare_workload(workload);
 out:
+       if (ret) {
+               /* We might still need to add request with
+                * clean ctx to retire it properly..
+                */
+               rq = fetch_and_zero(&workload->req);
+               i915_request_put(rq);
+       }
+
        if (!IS_ERR_OR_NULL(workload->req)) {
                gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
                                ring_id, workload->req);
@@ -739,7 +750,8 @@ static struct intel_vgpu_workload *pick_next_workload(
                goto out;
        }
 
-       if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
+       if (!scheduler->current_vgpu->active ||
+           list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
                goto out;
 
        /*
@@ -1474,8 +1486,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
                intel_runtime_pm_put_unchecked(dev_priv);
        }
 
-       if (ret && (vgpu_is_vm_unhealthy(ret))) {
-               enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+       if (ret) {
+               if (vgpu_is_vm_unhealthy(ret))
+                       enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
                intel_vgpu_destroy_workload(workload);
                return ERR_PTR(ret);
        }
index 0bd890c04fe4f7c911bd9bde1a79af11af08ff5c..f6f6e5b78e9784c0ffee5f7132a8ddd2a9339954 100644 (file)
@@ -4830,7 +4830,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
                ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
                                       &ctx);
                if (ret) {
-                       ret = -EINTR;
+                       if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
+                               try_again = true;
+                               continue;
+                       }
                        break;
                }
                crtc = connector->state->crtc;
index 9adc7bb9e69ccfec96e468f95435b83e084ffcce..a67a63b5aa84a09d675793dc118fce8829315917 100644 (file)
@@ -2346,7 +2346,8 @@ static inline unsigned int i915_sg_segment_size(void)
                                 INTEL_DEVID(dev_priv) == 0x5915 || \
                                 INTEL_DEVID(dev_priv) == 0x591E)
 #define IS_AML_ULX(dev_priv)   (INTEL_DEVID(dev_priv) == 0x591C || \
-                                INTEL_DEVID(dev_priv) == 0x87C0)
+                                INTEL_DEVID(dev_priv) == 0x87C0 || \
+                                INTEL_DEVID(dev_priv) == 0x87CA)
 #define IS_SKL_GT2(dev_priv)   (IS_SKYLAKE(dev_priv) && \
                                 INTEL_INFO(dev_priv)->gt == 2)
 #define IS_SKL_GT3(dev_priv)   (IS_SKYLAKE(dev_priv) && \
index 30d516e975c64697b2b45019ba747c6251b7ecf1..8558e81fdc2af85dd52486b7c8d55580fe997373 100644 (file)
@@ -1734,8 +1734,13 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
         * pages from.
         */
        if (!obj->base.filp) {
-               i915_gem_object_put(obj);
-               return -ENXIO;
+               addr = -ENXIO;
+               goto err;
+       }
+
+       if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
+               addr = -EINVAL;
+               goto err;
        }
 
        addr = vm_mmap(obj->base.filp, 0, args->size,
@@ -1749,8 +1754,8 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                struct vm_area_struct *vma;
 
                if (down_write_killable(&mm->mmap_sem)) {
-                       i915_gem_object_put(obj);
-                       return -EINTR;
+                       addr = -EINTR;
+                       goto err;
                }
                vma = find_vma(mm, addr);
                if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
@@ -1768,12 +1773,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
        i915_gem_object_put(obj);
 
        args->addr_ptr = (u64)addr;
-
        return 0;
 
 err:
        i915_gem_object_put(obj);
-
        return addr;
 }
 
index 02adcaf6ebea69086aa07be57fed55b347636c94..16f80a4488206a30522bd77f1841a5f80f0d4da0 100644 (file)
@@ -1667,6 +1667,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
                                             len)) {
 end_user:
                                user_access_end();
+end:
                                kvfree(relocs);
                                err = -EFAULT;
                                goto err;
@@ -1686,7 +1687,7 @@ end_user:
                 * relocations were valid.
                 */
                if (!user_access_begin(urelocs, size))
-                       goto end_user;
+                       goto end;
 
                for (copied = 0; copied < nreloc; copied++)
                        unsafe_put_user(-1,
@@ -2695,7 +2696,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
                 * when we did the "copy_from_user()" above.
                 */
                if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
-                       goto end_user;
+                       goto end;
 
                for (i = 0; i < args->buffer_count; i++) {
                        if (!(exec2_list[i].offset & UPDATE))
@@ -2709,6 +2710,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
                }
 end_user:
                user_access_end();
+end:;
        }
 
        args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
index 9a65341fec097e500ace05a410b62f6f19390d21..aa6791255252f1800b2609285fb399653625b000 100644 (file)
@@ -1721,7 +1721,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
                        i915_error_generate_code(error, engines));
        if (engines) {
                /* Just show the first executing process, more is confusing */
-               i = ffs(engines);
+               i = __ffs(engines);
                len += scnprintf(error->error_msg + len,
                                 sizeof(error->error_msg) - len,
                                 ", in %s [%d]",
index 638a586469f97be9fb83bbbcb152c518e7d46e1e..047855dd8c6b828ce42f926680f7d8466883d3cc 100644 (file)
@@ -2863,7 +2863,7 @@ enum i915_power_well_id {
 #define GEN11_GT_VEBOX_VDBOX_DISABLE   _MMIO(0x9140)
 #define   GEN11_GT_VDBOX_DISABLE_MASK  0xff
 #define   GEN11_GT_VEBOX_DISABLE_SHIFT 16
-#define   GEN11_GT_VEBOX_DISABLE_MASK  (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT)
+#define   GEN11_GT_VEBOX_DISABLE_MASK  (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
 
 #define GEN11_EU_DISABLE _MMIO(0x9134)
 #define GEN11_EU_DIS_MASK 0xFF
@@ -9243,7 +9243,7 @@ enum skl_power_gate {
 #define TRANS_DDI_FUNC_CTL2(tran)      _MMIO_TRANS2(tran, \
                                                     _TRANS_DDI_FUNC_CTL2_A)
 #define  PORT_SYNC_MODE_ENABLE                 (1 << 4)
-#define  PORT_SYNC_MODE_MASTER_SELECT(x)       ((x) < 0)
+#define  PORT_SYNC_MODE_MASTER_SELECT(x)       ((x) << 0)
 #define  PORT_SYNC_MODE_MASTER_SELECT_MASK     (0x7 << 0)
 #define  PORT_SYNC_MODE_MASTER_SELECT_SHIFT    0
 
index b713bed20c3880c088a45c49e8a787c786f6e971..41b5bcb803cb511e77a01ca239654b5a2d828dfc 100644 (file)
 
 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
 {
-       unsigned long entries[12];
-       struct stack_trace trace = {
-               .entries = entries,
-               .max_entries = ARRAY_SIZE(entries),
-       };
+       unsigned long *entries;
+       unsigned int nr_entries;
        char buf[512];
 
        if (!vma->node.stack) {
@@ -49,8 +46,8 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
                return;
        }
 
-       depot_fetch_stack(vma->node.stack, &trace);
-       snprint_stack_trace(buf, sizeof(buf), &trace, 0);
+       nr_entries = stack_depot_fetch(vma->node.stack, &entries);
+       stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
        DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
                         vma->node.start, vma->node.size, reason, buf);
 }
index 73a7bee24a663faa672ade21cfe7ea7cb1bc4b46..641e0778fa9c4123204f75091df3c53b5162a961 100644 (file)
@@ -323,6 +323,21 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
        }
 }
 
+static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
+                                    struct intel_dsi *intel_dsi)
+{
+       enum port port;
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               WARN_ON(intel_dsi->io_wakeref[port]);
+               intel_dsi->io_wakeref[port] =
+                       intel_display_power_get(dev_priv,
+                                               port == PORT_A ?
+                                               POWER_DOMAIN_PORT_DDI_A_IO :
+                                               POWER_DOMAIN_PORT_DDI_B_IO);
+       }
+}
+
 static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -336,13 +351,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
                I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
        }
 
-       for_each_dsi_port(port, intel_dsi->ports) {
-               intel_dsi->io_wakeref[port] =
-                       intel_display_power_get(dev_priv,
-                                               port == PORT_A ?
-                                               POWER_DOMAIN_PORT_DDI_A_IO :
-                                               POWER_DOMAIN_PORT_DDI_B_IO);
-       }
+       get_dsi_io_power_domains(dev_priv, intel_dsi);
 }
 
 static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
@@ -589,6 +598,12 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
                val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
        }
        I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+       }
+       I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
        POSTING_READ(DPCLKA_CFGCR0_ICL);
 
        mutex_unlock(&dev_priv->dpll_lock);
@@ -1117,7 +1132,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
                        DRM_ERROR("DDI port:%c buffer not idle\n",
                                  port_name(port));
        }
-       gen11_dsi_ungate_clocks(encoder);
+       gen11_dsi_gate_clocks(encoder);
 }
 
 static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
@@ -1218,20 +1233,11 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
        return 0;
 }
 
-static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
-                                      struct intel_crtc_state *crtc_state)
+static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
+                                       struct intel_crtc_state *crtc_state)
 {
-       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-       u64 domains = 0;
-       enum port port;
-
-       for_each_dsi_port(port, intel_dsi->ports)
-               if (port == PORT_A)
-                       domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO);
-               else
-                       domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO);
-
-       return domains;
+       get_dsi_io_power_domains(to_i915(encoder->base.dev),
+                                enc_to_intel_dsi(&encoder->base));
 }
 
 static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
index b508d8a735e0347637274aebb2a5eaed29dda2fd..4364f42cac6b88cfd8eef1f82783a863e483884d 100644 (file)
@@ -1673,6 +1673,7 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
                info->supports_dvi = (port != PORT_A && port != PORT_E);
                info->supports_hdmi = info->supports_dvi;
                info->supports_dp = (port != PORT_E);
+               info->supports_edp = (port == PORT_A);
        }
 }
 
index 14d580cdefd3e875e08b7af0be350d4f877fb7ef..98cea1f4b3bf05500dcd7fe24b2f367fa6c9e3eb 100644 (file)
@@ -2075,12 +2075,11 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
                                              intel_aux_power_domain(dig_port);
 }
 
-static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
-                                      struct intel_crtc_state *crtc_state)
+static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
+                                       struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_digital_port *dig_port;
-       u64 domains;
 
        /*
         * TODO: Add support for MST encoders. Atm, the following should never
@@ -2088,10 +2087,10 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
         * hook.
         */
        if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
-               return 0;
+               return;
 
        dig_port = enc_to_dig_port(&encoder->base);
-       domains = BIT_ULL(dig_port->ddi_io_power_domain);
+       intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 
        /*
         * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
@@ -2099,15 +2098,15 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
         */
        if (intel_crtc_has_dp_encoder(crtc_state) ||
            intel_port_is_tc(dev_priv, encoder->port))
-               domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
+               intel_display_power_get(dev_priv,
+                                       intel_ddi_main_link_aux_domain(dig_port));
 
        /*
         * VDSC power is needed when DSC is enabled
         */
        if (crtc_state->dsc_params.compression_enable)
-               domains |= BIT_ULL(intel_dsc_power_domain(crtc_state));
-
-       return domains;
+               intel_display_power_get(dev_priv,
+                                       intel_dsc_power_domain(crtc_state));
 }
 
 void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
@@ -2825,10 +2824,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
                                return;
                }
                /*
-                * DSI ports should have their DDI clock ungated when disabled
-                * and gated when enabled.
+                * For DSI we keep the ddi clocks gated
+                * except during enable/disable sequence.
                 */
-               ddi_clk_needed = !encoder->base.crtc;
+               ddi_clk_needed = false;
        }
 
        val = I915_READ(DPCLKA_CFGCR0_ICL);
@@ -3863,14 +3862,16 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
                ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
        else
                ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
+       if (ret)
+               return ret;
 
-       if (IS_GEN9_LP(dev_priv) && ret)
+       if (IS_GEN9_LP(dev_priv))
                pipe_config->lane_lat_optim_mask =
                        bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
 
        intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
 
-       return ret;
+       return 0;
 
 }
 
index ccb616351bba725052ea3a752cc5263744d58af2..421aac80a83815b9c1cfa40a7142e171cfd8bcd5 100644 (file)
@@ -15986,8 +15986,6 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
        struct intel_encoder *encoder;
 
        for_each_intel_encoder(&dev_priv->drm, encoder) {
-               u64 get_domains;
-               enum intel_display_power_domain domain;
                struct intel_crtc_state *crtc_state;
 
                if (!encoder->get_power_domains)
@@ -16001,9 +15999,7 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
                        continue;
 
                crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
-               get_domains = encoder->get_power_domains(encoder, crtc_state);
-               for_each_power_domain(domain, get_domains)
-                       intel_display_power_get(dev_priv, domain);
+               encoder->get_power_domains(encoder, crtc_state);
        }
 }
 
index cf709835fb9a9eece3c0761c21c53c34a25b7e22..48da4a969a0a9afabf6be3db6aff252fd02d3c83 100644 (file)
@@ -1859,42 +1859,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
        return -EINVAL;
 }
 
-/* Optimize link config in order: max bpp, min lanes, min clock */
-static int
-intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
-                                 struct intel_crtc_state *pipe_config,
-                                 const struct link_config_limits *limits)
-{
-       struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-       int bpp, clock, lane_count;
-       int mode_rate, link_clock, link_avail;
-
-       for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
-               mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
-                                                  bpp);
-
-               for (lane_count = limits->min_lane_count;
-                    lane_count <= limits->max_lane_count;
-                    lane_count <<= 1) {
-                       for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
-                               link_clock = intel_dp->common_rates[clock];
-                               link_avail = intel_dp_max_data_rate(link_clock,
-                                                                   lane_count);
-
-                               if (mode_rate <= link_avail) {
-                                       pipe_config->lane_count = lane_count;
-                                       pipe_config->pipe_bpp = bpp;
-                                       pipe_config->port_clock = link_clock;
-
-                                       return 0;
-                               }
-                       }
-               }
-       }
-
-       return -EINVAL;
-}
-
 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
 {
        int i, num_bpc;
@@ -1922,6 +1886,9 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
        int pipe_bpp;
        int ret;
 
+       pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
+               intel_dp_supports_fec(intel_dp, pipe_config);
+
        if (!intel_dp_supports_dsc(intel_dp, pipe_config))
                return -EINVAL;
 
@@ -2031,15 +1998,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
        limits.min_bpp = 6 * 3;
        limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
 
-       if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
+       if (intel_dp_is_edp(intel_dp)) {
                /*
                 * Use the maximum clock and number of lanes the eDP panel
-                * advertizes being capable of. The eDP 1.3 and earlier panels
-                * are generally designed to support only a single clock and
-                * lane configuration, and typically these values correspond to
-                * the native resolution of the panel. With eDP 1.4 rate select
-                * and DSC, this is decreasingly the case, and we need to be
-                * able to select less than maximum link config.
+                * advertizes being capable of. The panels are generally
+                * designed to support only a single clock and lane
+                * configuration, and typically these values correspond to the
+                * native resolution of the panel.
                 */
                limits.min_lane_count = limits.max_lane_count;
                limits.min_clock = limits.max_clock;
@@ -2053,22 +2018,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
                      intel_dp->common_rates[limits.max_clock],
                      limits.max_bpp, adjusted_mode->crtc_clock);
 
-       if (intel_dp_is_edp(intel_dp))
-               /*
-                * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
-                * section A.1: "It is recommended that the minimum number of
-                * lanes be used, using the minimum link rate allowed for that
-                * lane configuration."
-                *
-                * Note that we use the max clock and lane count for eDP 1.3 and
-                * earlier, and fast vs. wide is irrelevant.
-                */
-               ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
-                                                       &limits);
-       else
-               /* Optimize for slow and wide. */
-               ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
-                                                       &limits);
+       /*
+        * Optimize for slow and wide. This is the place to add alternative
+        * optimization policy.
+        */
+       ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
 
        /* enable compression if the mode doesn't fit available BW */
        DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
@@ -2165,9 +2119,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
                return -EINVAL;
 
-       pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
-                                 intel_dp_supports_fec(intel_dp, pipe_config);
-
        ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
        if (ret < 0)
                return ret;
index 15db41394b9ed75d9de8545c8a0faff9efbc9a9b..d5660ac1b0d60999d8788710a0140ec2f8d38570 100644 (file)
@@ -270,10 +270,12 @@ struct intel_encoder {
         * be set correctly before calling this function. */
        void (*get_config)(struct intel_encoder *,
                           struct intel_crtc_state *pipe_config);
-       /* Returns a mask of power domains that need to be referenced as part
-        * of the hardware state readout code. */
-       u64 (*get_power_domains)(struct intel_encoder *encoder,
-                                struct intel_crtc_state *crtc_state);
+       /*
+        * Acquires the power domains needed for an active encoder during
+        * hardware state readout.
+        */
+       void (*get_power_domains)(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *crtc_state);
        /*
         * Called during system suspend after all pending requests for the
         * encoder are flushed (for example for DP AUX transactions) and
index e8f694b57b8ac857528824051ddcc42016e86239..376ffe842e2678d1f31ee68acd38908cff8a85d9 100644 (file)
@@ -338,8 +338,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                                    bool *enabled, int width, int height)
 {
        struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
+       unsigned long conn_configured, conn_seq, mask;
        unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
-       unsigned long conn_configured, conn_seq;
        int i, j;
        bool *save_enabled;
        bool fallback = true, ret = true;
@@ -357,9 +357,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                drm_modeset_backoff(&ctx);
 
        memcpy(save_enabled, enabled, count);
-       conn_seq = GENMASK(count - 1, 0);
+       mask = GENMASK(count - 1, 0);
        conn_configured = 0;
 retry:
+       conn_seq = conn_configured;
        for (i = 0; i < count; i++) {
                struct drm_fb_helper_connector *fb_conn;
                struct drm_connector *connector;
@@ -372,8 +373,7 @@ retry:
                if (conn_configured & BIT(i))
                        continue;
 
-               /* First pass, only consider tiled connectors */
-               if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
+               if (conn_seq == 0 && !connector->has_tile)
                        continue;
 
                if (connector->status == connector_status_connected)
@@ -477,10 +477,8 @@ retry:
                conn_configured |= BIT(i);
        }
 
-       if (conn_configured != conn_seq) { /* repeat until no more are found */
-               conn_seq = conn_configured;
+       if ((conn_configured & mask) != mask && conn_configured != conn_seq)
                goto retry;
-       }
 
        /*
         * If the BIOS didn't enable everything it could, fall back to have the
index a017a4232c0fae4580da8b0a59bf54a9ba7fdecb..20c4434474e3a504060370c3f7d0d701969fff7f 100644 (file)
 static noinline depot_stack_handle_t __save_depot_stack(void)
 {
        unsigned long entries[STACKDEPTH];
-       struct stack_trace trace = {
-               .entries = entries,
-               .max_entries = ARRAY_SIZE(entries),
-               .skip = 1,
-       };
+       unsigned int n;
 
-       save_stack_trace(&trace);
-       if (trace.nr_entries &&
-           trace.entries[trace.nr_entries - 1] == ULONG_MAX)
-               trace.nr_entries--;
-
-       return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
+       n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
+       return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
 }
 
 static void __print_depot_stack(depot_stack_handle_t stack,
                                char *buf, int sz, int indent)
 {
-       unsigned long entries[STACKDEPTH];
-       struct stack_trace trace = {
-               .entries = entries,
-               .max_entries = ARRAY_SIZE(entries),
-       };
+       unsigned long *entries;
+       unsigned int nr_entries;
 
-       depot_fetch_stack(stack, &trace);
-       snprint_stack_trace(buf, sz, &trace, indent);
+       nr_entries = stack_depot_fetch(stack, &entries);
+       stack_trace_snprint(buf, sz, entries, nr_entries, indent);
 }
 
 static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
index 32dce7176f6381dc2a0429691dccc2eafc7fe360..b9b0ea4e2404d6cfce2c37be5d331591fb88fe6e 100644 (file)
@@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg)
                        struct i915_gem_context *ctx;
 
                        ctx = live_context(i915, file);
-                       if (!ctx)
+                       if (IS_ERR(ctx))
                                break;
 
                        /* We will need some GGTT space for the rq's context */
index 6403728fe7784f54977b0c318d790ea886553a04..31c93c3ccd00ffa62c3158d159d7cc4afd8f9ae5 100644 (file)
@@ -256,6 +256,28 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
        mutex_unlock(&dev_priv->sb_lock);
 }
 
+static int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       u32 tmp;
+
+       tmp = I915_READ(PIPEMISC(crtc->pipe));
+
+       switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
+       case PIPEMISC_DITHER_6_BPC:
+               return 18;
+       case PIPEMISC_DITHER_8_BPC:
+               return 24;
+       case PIPEMISC_DITHER_10_BPC:
+               return 30;
+       case PIPEMISC_DITHER_12_BPC:
+               return 36;
+       default:
+               MISSING_CASE(tmp);
+               return 0;
+       }
+}
+
 static int intel_dsi_compute_config(struct intel_encoder *encoder,
                                    struct intel_crtc_state *pipe_config,
                                    struct drm_connector_state *conn_state)
@@ -1071,6 +1093,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
        bpp = mipi_dsi_pixel_format_to_bpp(
                        pixel_format_from_register_bits(fmt));
 
+       pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
+
        /* Enable Frame time stamo based scanline reporting */
        adjusted_mode->private_flags |=
                        I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
index ec3602ebbc1cd1e87da13c9c909078927ccf2287..54011df8c2e807d7984dc7985764525899ff49d7 100644 (file)
@@ -71,7 +71,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
        if (disable_partial)
                ipu_plane_disable(ipu_crtc->plane[1], true);
        if (disable_full)
-               ipu_plane_disable(ipu_crtc->plane[0], false);
+               ipu_plane_disable(ipu_crtc->plane[0], true);
 }
 
 static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
index 22e68a100e7beeaf752efde0235dddc7e13b9683..5d333138f9136b6e8b98238a9851085dd669c236 100644 (file)
@@ -662,13 +662,11 @@ static unsigned int mt8173_calculate_factor(int clock)
 static unsigned int mt2701_calculate_factor(int clock)
 {
        if (clock <= 64000)
-               return 16;
-       else if (clock <= 128000)
-               return 8;
-       else if (clock <= 256000)
                return 4;
-       else
+       else if (clock <= 128000)
                return 2;
+       else
+               return 1;
 }
 
 static const struct mtk_dpi_conf mt8173_conf = {
index cf59ea9bccfdf659ca042df67cf437a5da4a1078..57ce4708ef1b9a420cd9862d3c7b7d624a7efa3f 100644 (file)
@@ -15,6 +15,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_of.h>
@@ -341,6 +342,8 @@ static struct drm_driver mtk_drm_driver = {
        .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
        .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
        .gem_prime_mmap = mtk_drm_gem_mmap_buf,
+       .gem_prime_vmap = mtk_drm_gem_prime_vmap,
+       .gem_prime_vunmap = mtk_drm_gem_prime_vunmap,
        .fops = &mtk_drm_fops,
 
        .name = DRIVER_NAME,
@@ -376,6 +379,10 @@ static int mtk_drm_bind(struct device *dev)
        if (ret < 0)
                goto err_deinit;
 
+       ret = drm_fbdev_generic_setup(drm, 32);
+       if (ret)
+               DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
+
        return 0;
 
 err_deinit:
index 259b7b0de1d22d7beb052c19e2ef06e4859afcff..38483e9ee071223228e3b9bc493dc27799390411 100644 (file)
@@ -241,3 +241,49 @@ err_gem_free:
        kfree(mtk_gem);
        return ERR_PTR(ret);
 }
+
+void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
+{
+       struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+       struct sg_table *sgt;
+       struct sg_page_iter iter;
+       unsigned int npages;
+       unsigned int i = 0;
+
+       if (mtk_gem->kvaddr)
+               return mtk_gem->kvaddr;
+
+       sgt = mtk_gem_prime_get_sg_table(obj);
+       if (IS_ERR(sgt))
+               return NULL;
+
+       npages = obj->size >> PAGE_SHIFT;
+       mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
+       if (!mtk_gem->pages)
+               goto out;
+
+       for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
+               mtk_gem->pages[i++] = sg_page_iter_page(&iter);
+               if (i > npages)
+                       break;
+       }
+       mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
+                              pgprot_writecombine(PAGE_KERNEL));
+
+out:
+       kfree((void *)sgt);
+
+       return mtk_gem->kvaddr;
+}
+
+void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+       struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+
+       if (!mtk_gem->pages)
+               return;
+
+       vunmap(vaddr);
+       mtk_gem->kvaddr = 0;
+       kfree((void *)mtk_gem->pages);
+}
index 534639b43a1c77c24a5942c6cb627dc32389f425..c047a7ef294fd0391ef01cf15071f2d8b3e98f3f 100644 (file)
@@ -37,6 +37,7 @@ struct mtk_drm_gem_obj {
        dma_addr_t              dma_addr;
        unsigned long           dma_attrs;
        struct sg_table         *sg;
+       struct page             **pages;
 };
 
 #define to_mtk_gem_obj(x)      container_of(x, struct mtk_drm_gem_obj, base)
@@ -52,5 +53,7 @@ int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj,
 struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
 struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
                        struct dma_buf_attachment *attach, struct sg_table *sg);
+void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj);
+void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 
 #endif
index 915cc84621aeaf62516681a87d56e0e9760197d4..e04e6c293d39d189e87cb9bc1d25e4e840750857 100644 (file)
@@ -1480,7 +1480,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
        if (IS_ERR(regmap))
                ret = PTR_ERR(regmap);
        if (ret) {
-               ret = PTR_ERR(regmap);
                dev_err(dev,
                        "Failed to get system configuration registers: %d\n",
                        ret);
@@ -1516,6 +1515,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
        of_node_put(remote);
 
        hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
+       of_node_put(i2c_np);
        if (!hdmi->ddc_adpt) {
                dev_err(dev, "Failed to get ddc i2c adapter by node\n");
                return -EINVAL;
index 4ef9c57ffd44d4eb6db90dd8cecc0ed02de81295..5223498502c49228839fb993c4c2abd7ccc84a89 100644 (file)
@@ -15,28 +15,6 @@ static const struct phy_ops mtk_hdmi_phy_dev_ops = {
        .owner = THIS_MODULE,
 };
 
-long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
-                            unsigned long *parent_rate)
-{
-       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
-       hdmi_phy->pll_rate = rate;
-       if (rate <= 74250000)
-               *parent_rate = rate;
-       else
-               *parent_rate = rate / 2;
-
-       return rate;
-}
-
-unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
-                                      unsigned long parent_rate)
-{
-       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
-       return hdmi_phy->pll_rate;
-}
-
 void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
                             u32 bits)
 {
@@ -110,13 +88,11 @@ mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy)
                return NULL;
 }
 
-static void mtk_hdmi_phy_clk_get_ops(struct mtk_hdmi_phy *hdmi_phy,
-                                    const struct clk_ops **ops)
+static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy,
+                                     struct clk_init_data *clk_init)
 {
-       if (hdmi_phy && hdmi_phy->conf && hdmi_phy->conf->hdmi_phy_clk_ops)
-               *ops = hdmi_phy->conf->hdmi_phy_clk_ops;
-       else
-               dev_err(hdmi_phy->dev, "Failed to get clk ops of phy\n");
+       clk_init->flags = hdmi_phy->conf->flags;
+       clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops;
 }
 
 static int mtk_hdmi_phy_probe(struct platform_device *pdev)
@@ -129,7 +105,6 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
        struct clk_init_data clk_init = {
                .num_parents = 1,
                .parent_names = (const char * const *)&ref_clk_name,
-               .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
        };
 
        struct phy *phy;
@@ -167,7 +142,7 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
        hdmi_phy->dev = dev;
        hdmi_phy->conf =
                (struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev);
-       mtk_hdmi_phy_clk_get_ops(hdmi_phy, &clk_init.ops);
+       mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init);
        hdmi_phy->pll_hw.init = &clk_init;
        hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
        if (IS_ERR(hdmi_phy->pll)) {
index f39b1fc66612944c9b76b8b20f87ef561a0e595b..2d8b3182470dc465b29111e00ee2c9229e4f7c0d 100644 (file)
@@ -21,6 +21,7 @@ struct mtk_hdmi_phy;
 
 struct mtk_hdmi_phy_conf {
        bool tz_disabled;
+       unsigned long flags;
        const struct clk_ops *hdmi_phy_clk_ops;
        void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
        void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
@@ -48,10 +49,6 @@ void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
 void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
                       u32 val, u32 mask);
 struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw);
-long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
-                            unsigned long *parent_rate);
-unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
-                                      unsigned long parent_rate);
 
 extern struct platform_driver mtk_hdmi_phy_driver;
 extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf;
index fcc42dc6ea7fb81d1005239a52f81bc020b2f57a..d3cc4022e98844601b82928020ec7cf0cfaf1004 100644 (file)
@@ -79,7 +79,6 @@ static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
        usleep_range(80, 100);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
@@ -94,7 +93,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
        usleep_range(80, 100);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
@@ -108,6 +106,12 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
        usleep_range(80, 100);
 }
 
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long *parent_rate)
+{
+       return rate;
+}
+
 static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
                                 unsigned long parent_rate)
 {
@@ -116,13 +120,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
 
        if (rate <= 64000000)
                pos_div = 3;
-       else if (rate <= 12800000)
-               pos_div = 1;
+       else if (rate <= 128000000)
+               pos_div = 2;
        else
                pos_div = 1;
 
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
        mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC),
                          RG_HTPLL_IC_MASK);
        mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR),
@@ -154,6 +159,39 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
        return 0;
 }
 
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+       unsigned long out_rate, val;
+
+       val = (readl(hdmi_phy->regs + HDMI_CON6)
+              & RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV;
+       switch (val) {
+       case 0x00:
+               out_rate = parent_rate;
+               break;
+       case 0x01:
+               out_rate = parent_rate / 2;
+               break;
+       default:
+               out_rate = parent_rate / 4;
+               break;
+       }
+
+       val = (readl(hdmi_phy->regs + HDMI_CON6)
+              & RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV;
+       out_rate *= (val + 1) * 2;
+       val = (readl(hdmi_phy->regs + HDMI_CON2)
+              & RG_HDMITX_TX_POSDIV_MASK);
+       out_rate >>= (val >> RG_HDMITX_TX_POSDIV);
+
+       if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV)
+               out_rate /= 5;
+
+       return out_rate;
+}
+
 static const struct clk_ops mtk_hdmi_phy_pll_ops = {
        .prepare = mtk_hdmi_pll_prepare,
        .unprepare = mtk_hdmi_pll_unprepare,
@@ -174,7 +212,6 @@ static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
        usleep_range(80, 100);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
@@ -186,7 +223,6 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
        usleep_range(80, 100);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
@@ -202,6 +238,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
 
 struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = {
        .tz_disabled = true,
+       .flags = CLK_SET_RATE_GATE,
        .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
        .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
        .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
index ed5916b2765843b09056f0788ce93d190e5a0367..47f8a295168224b525959bafb8f0e631ee799b2b 100644 (file)
@@ -199,6 +199,20 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
        usleep_range(100, 150);
 }
 
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long *parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+       hdmi_phy->pll_rate = rate;
+       if (rate <= 74250000)
+               *parent_rate = rate;
+       else
+               *parent_rate = rate / 2;
+
+       return rate;
+}
+
 static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
                                 unsigned long parent_rate)
 {
@@ -285,6 +299,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
        return 0;
 }
 
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+       return hdmi_phy->pll_rate;
+}
+
 static const struct clk_ops mtk_hdmi_phy_pll_ops = {
        .prepare = mtk_hdmi_pll_prepare,
        .unprepare = mtk_hdmi_pll_unprepare,
@@ -309,6 +331,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
 }
 
 struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = {
+       .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
        .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
        .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
        .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
index 2281ed3eb7747757620288069f32d48a53b9ea15..8a4ebcb6405cee2427d0889ea49a0d871d2cc5ba 100644 (file)
@@ -337,12 +337,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
 
        ret = drm_dev_register(drm, 0);
        if (ret)
-               goto free_drm;
+               goto uninstall_irq;
 
        drm_fbdev_generic_setup(drm, 32);
 
        return 0;
 
+uninstall_irq:
+       drm_irq_uninstall(drm);
 free_drm:
        drm_dev_put(drm);
 
@@ -356,8 +358,8 @@ static int meson_drv_bind(struct device *dev)
 
 static void meson_drv_unbind(struct device *dev)
 {
-       struct drm_device *drm = dev_get_drvdata(dev);
-       struct meson_drm *priv = drm->dev_private;
+       struct meson_drm *priv = dev_get_drvdata(dev);
+       struct drm_device *drm = priv->drm;
 
        if (priv->canvas) {
                meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
@@ -367,6 +369,7 @@ static void meson_drv_unbind(struct device *dev)
        }
 
        drm_dev_unregister(drm);
+       drm_irq_uninstall(drm);
        drm_kms_helper_poll_fini(drm);
        drm_mode_config_cleanup(drm);
        drm_dev_put(drm);
index e28814f4ea6cd2e05724ee46a0892b261d3d4cef..563953ec6ad03fd904c2e5c38de8cbe1dc2edce0 100644 (file)
@@ -569,7 +569,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
        DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
 
        /* If sink max TMDS clock, we reject the mode */
-       if (mode->clock > connector->display_info.max_tmds_clock)
+       if (connector->display_info.max_tmds_clock &&
+           mode->clock > connector->display_info.max_tmds_clock)
                return MODE_BAD;
 
        /* Check against non-VIC supported modes */
index 88a52f6b39fe333df24c33dce9aef2535d6a1b09..7dfbbbc1beea6ad1f5fa10cd535bffa130badebe 100644 (file)
@@ -181,7 +181,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
        }
 
        ret = pm_runtime_get_sync(drm->dev);
-       if (IS_ERR_VALUE(ret) && ret != -EACCES)
+       if (ret < 0 && ret != -EACCES)
                return ret;
        ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
        pm_runtime_put_autosuspend(drm->dev);
index aa9fec80492d167f720a07ee58f8e0196d858c3a..40c47d6a7d783d72c869937b2a6ad946086a20cf 100644 (file)
@@ -100,12 +100,10 @@ static void
 nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page)
 {
        struct nouveau_dmem_chunk *chunk;
-       struct nouveau_drm *drm;
        unsigned long idx;
 
        chunk = (void *)hmm_devmem_page_get_drvdata(page);
        idx = page_to_pfn(page) - chunk->pfn_first;
-       drm = chunk->drm;
 
        /*
         * FIXME:
@@ -456,11 +454,6 @@ nouveau_dmem_resume(struct nouveau_drm *drm)
                /* FIXME handle pin failure */
                WARN_ON(ret);
        }
-       list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
-               ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
-               /* FIXME handle pin failure */
-               WARN_ON(ret);
-       }
        mutex_unlock(&drm->dmem->mutex);
 }
 
@@ -479,9 +472,6 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
        list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
                nouveau_bo_unpin(chunk->bo);
        }
-       list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
-               nouveau_bo_unpin(chunk->bo);
-       }
        mutex_unlock(&drm->dmem->mutex);
 }
 
@@ -623,7 +613,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
         */
        drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops,
                                           device, size);
-       if (drm->dmem->devmem == NULL) {
+       if (IS_ERR(drm->dmem->devmem)) {
                kfree(drm->dmem);
                drm->dmem = NULL;
                return;
index 340383150fb98d24567b6ca74cae0298cda806b6..ebf9c96d43eee56649e510a5ca8c53a045b10c67 100644 (file)
@@ -175,6 +175,7 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
                REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3);
                hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE);
                hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE);
+               REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
                hdmi4_core_disable(core);
                return 0;
        }
@@ -182,16 +183,24 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
        if (err)
                return err;
 
+       /*
+        * Initialize CEC clock divider: CEC needs 2MHz clock hence
+        * set the divider to 24 to get 48/24=2MHz clock
+        */
+       REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
+
        /* Clear TX FIFO */
        if (!hdmi_cec_clear_tx_fifo(adap)) {
                pr_err("cec-%s: could not clear TX FIFO\n", adap->name);
-               return -EIO;
+               err = -EIO;
+               goto err_disable_clk;
        }
 
        /* Clear RX FIFO */
        if (!hdmi_cec_clear_rx_fifo(adap)) {
                pr_err("cec-%s: could not clear RX FIFO\n", adap->name);
-               return -EIO;
+               err = -EIO;
+               goto err_disable_clk;
        }
 
        /* Clear CEC interrupts */
@@ -236,6 +245,12 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
                hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp);
        }
        return 0;
+
+err_disable_clk:
+       REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
+       hdmi4_core_disable(core);
+
+       return err;
 }
 
 static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
@@ -333,11 +348,8 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
                return ret;
        core->wp = wp;
 
-       /*
-        * Initialize CEC clock divider: CEC needs 2MHz clock hence
-        * set the devider to 24 to get 48/24=2MHz clock
-        */
-       REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
+       /* Disable clock initially, hdmi_cec_adap_enable() manages it */
+       REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0);
 
        ret = cec_register_adapter(core->adap, &pdev->dev);
        if (ret < 0) {
index 813ba42f27539ce94b85afc20295e411f6b4c123..e384b95ad8573a7ef9ad9bb9a631432eda4a726f 100644 (file)
@@ -708,7 +708,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
        else
                acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
        /*
-        * The I2S input word length is twice the lenght given in the IEC-60958
+        * The I2S input word length is twice the length given in the IEC-60958
         * status word. If the word size is greater than
         * 20 bits, increment by one.
         */
index 578d867a81d59aa476d56693d7f51399f3065dee..f33e349c4ec5b4f48db8edd7b69d11f0e4a3c83a 100644 (file)
@@ -255,10 +255,14 @@ static struct drm_driver qxl_driver = {
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = qxl_debugfs_init,
 #endif
+       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_export = drm_gem_prime_export,
        .gem_prime_import = drm_gem_prime_import,
        .gem_prime_pin = qxl_gem_prime_pin,
        .gem_prime_unpin = qxl_gem_prime_unpin,
+       .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
+       .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
        .gem_prime_vmap = qxl_gem_prime_vmap,
        .gem_prime_vunmap = qxl_gem_prime_vunmap,
        .gem_prime_mmap = qxl_gem_prime_mmap,
index 8b448eca1cd996a4ffba1931e369b9ba02fc4ad6..114653b471c6a8969174ec4759c5c0cb7d9a46e7 100644 (file)
@@ -42,6 +42,18 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj)
        qxl_bo_unpin(bo);
 }
 
+struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+       return ERR_PTR(-ENOSYS);
+}
+
+struct drm_gem_object *qxl_gem_prime_import_sg_table(
+       struct drm_device *dev, struct dma_buf_attachment *attach,
+       struct sg_table *table)
+{
+       return ERR_PTR(-ENOSYS);
+}
+
 void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
 {
        struct qxl_bo *bo = gem_to_qxl_bo(obj);
index c7d4c6073ea59b70c56559288def3fb7fd6fe215..0d4ade9d4722c340b706b82d7ea7bb587db5f293 100644 (file)
@@ -541,6 +541,18 @@ static void vop_core_clks_disable(struct vop *vop)
        clk_disable(vop->hclk);
 }
 
+static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
+{
+       if (win->phy->scl && win->phy->scl->ext) {
+               VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
+               VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
+               VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
+               VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
+       }
+
+       VOP_WIN_SET(vop, win, enable, 0);
+}
+
 static int vop_enable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
@@ -586,7 +598,7 @@ static int vop_enable(struct drm_crtc *crtc)
                struct vop_win *vop_win = &vop->win[i];
                const struct vop_win_data *win = vop_win->data;
 
-               VOP_WIN_SET(vop, win, enable, 0);
+               vop_win_disable(vop, win);
        }
        spin_unlock(&vop->reg_lock);
 
@@ -735,7 +747,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
 
        spin_lock(&vop->reg_lock);
 
-       VOP_WIN_SET(vop, win, enable, 0);
+       vop_win_disable(vop, win);
 
        spin_unlock(&vop->reg_lock);
 }
@@ -1622,7 +1634,7 @@ static int vop_initial(struct vop *vop)
                int channel = i * 2 + 1;
 
                VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
-               VOP_WIN_SET(vop, win, enable, 0);
+               vop_win_disable(vop, win);
                VOP_WIN_SET(vop, win, gate, 1);
        }
 
index 19fc601c9eeb52fc9704bbfc7c164dbfa71b7717..a1bec2779e76220c8568f5c78a7345ef2f7c3d36 100644 (file)
@@ -366,10 +366,9 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
 EXPORT_SYMBOL(drm_sched_increase_karma);
 
 /**
- * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
+ * drm_sched_stop - stop the scheduler
  *
  * @sched: scheduler instance
- * @bad: bad scheduler job
  *
  */
 void drm_sched_stop(struct drm_gpu_scheduler *sched)
index 3ebd9f5e2719d7f028c2c87b1e2cedd6c60a5365..29258b404e549fbd31d67ec164c3bc852a10eb14 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/of_reserved_mem.h>
 
 #include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem_cma_helper.h>
@@ -85,6 +86,8 @@ static int sun4i_drv_bind(struct device *dev)
                ret = -ENOMEM;
                goto free_drm;
        }
+
+       dev_set_drvdata(dev, drm);
        drm->dev_private = drv;
        INIT_LIST_HEAD(&drv->frontend_list);
        INIT_LIST_HEAD(&drv->engine_list);
@@ -144,8 +147,12 @@ static void sun4i_drv_unbind(struct device *dev)
 
        drm_dev_unregister(drm);
        drm_kms_helper_poll_fini(drm);
+       drm_atomic_helper_shutdown(drm);
        drm_mode_config_cleanup(drm);
+
+       component_unbind_all(dev, NULL);
        of_reserved_mem_device_release(dev);
+
        drm_dev_put(drm);
 }
 
@@ -395,6 +402,8 @@ static int sun4i_drv_probe(struct platform_device *pdev)
 
 static int sun4i_drv_remove(struct platform_device *pdev)
 {
+       component_master_del(&pdev->dev, &sun4i_drv_master_ops);
+
        return 0;
 }
 
index dc47720c99ba5689a8c12f217c7859960bfa394d..39d8509d96a0d3162f8bd7c310f4aa0a18898f6b 100644 (file)
@@ -48,8 +48,13 @@ static enum drm_mode_status
 sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector,
                            const struct drm_display_mode *mode)
 {
-       /* This is max for HDMI 2.0b (4K@60Hz) */
-       if (mode->clock > 594000)
+       /*
+        * Controller support maximum of 594 MHz, which correlates to
+        * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than
+        * 340 MHz scrambling has to be enabled. Because scrambling is
+        * not yet implemented, just limit to 340 MHz for now.
+        */
+       if (mode->clock > 340000)
                return MODE_CLOCK_HIGH;
 
        return MODE_OK;
index fc36e0c10a374a2a33a054a4102944dfb0b03009..b1e7c76e9c17269664fddd5ab5c90c3477b80a0c 100644 (file)
@@ -227,7 +227,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
 
 err_unregister_gates:
        for (i = 0; i < CLK_NUM; i++)
-               if (clk_data->hws[i])
+               if (!IS_ERR_OR_NULL(clk_data->hws[i]))
                        clk_hw_unregister_gate(clk_data->hws[i]);
        clk_disable_unprepare(tcon_top->bus);
 err_assert_reset:
@@ -245,7 +245,8 @@ static void sun8i_tcon_top_unbind(struct device *dev, struct device *master,
 
        of_clk_del_provider(dev->of_node);
        for (i = 0; i < CLK_NUM; i++)
-               clk_hw_unregister_gate(clk_data->hws[i]);
+               if (clk_data->hws[i])
+                       clk_hw_unregister_gate(clk_data->hws[i]);
 
        clk_disable_unprepare(tcon_top->bus);
        reset_control_assert(tcon_top->rst);
index 47c55974756d576b71193219b92d976078006b4e..d23c4bfde790ca0864722e8b1eb2907b7c9ee24f 100644 (file)
@@ -1260,9 +1260,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
 
        hdmi->dvi = !tegra_output_is_hdmi(output);
        if (!hdmi->dvi) {
-               err = tegra_hdmi_setup_audio(hdmi);
-               if (err < 0)
-                       hdmi->dvi = true;
+               /*
+                * Make sure that the audio format has been configured before
+                * enabling audio, otherwise we may try to divide by zero.
+               */
+               if (hdmi->format.sample_rate > 0) {
+                       err = tegra_hdmi_setup_audio(hdmi);
+                       if (err < 0)
+                               hdmi->dvi = true;
+               }
        }
 
        if (hdmi->config->has_hda)
index ba9b3cfb8c3d247fae80f8026cc520936e5b954c..b3436c2aed6892b585ca221a9ac711027350310e 100644 (file)
@@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
 static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
                                              struct drm_plane_state *old_state)
 {
-       struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
        struct tegra_plane *p = to_tegra_plane(plane);
+       struct tegra_dc *dc;
        u32 value;
 
        /* rien ne va plus */
        if (!old_state || !old_state->crtc)
                return;
 
+       dc = to_tegra_dc(old_state->crtc);
+
        /*
         * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
         * on planes that are already disabled. Make sure we fallback to the
index 39bfed9623de28f0e62a0297f8e84b7151c28238..982ce37ecde1b0c9fc6ef07c9819b98541248151 100644 (file)
@@ -106,6 +106,7 @@ static int vic_boot(struct vic *vic)
        if (vic->booted)
                return 0;
 
+#ifdef CONFIG_IOMMU_API
        if (vic->config->supports_sid) {
                struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
                u32 value;
@@ -121,6 +122,7 @@ static int vic_boot(struct vic *vic)
                        vic_writel(vic, value, VIC_THI_STREAMID1);
                }
        }
+#endif
 
        /* setup clockgating registers */
        vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
index 3f56647cdb35f94ddcead862b286516ad903150e..1a01669b159ab78c0c9b849616bcc7d852738b77 100644 (file)
@@ -49,9 +49,8 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
  * ttm_global_mutex - protecting the global BO state
  */
 DEFINE_MUTEX(ttm_global_mutex);
-struct ttm_bo_global ttm_bo_glob = {
-       .use_count = 0
-};
+unsigned ttm_bo_glob_use_count;
+struct ttm_bo_global ttm_bo_glob;
 
 static struct attribute ttm_bo_count = {
        .name = "bo_count",
@@ -876,8 +875,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
                reservation_object_add_shared_fence(bo->resv, fence);
 
                ret = reservation_object_reserve_shared(bo->resv, 1);
-               if (unlikely(ret))
+               if (unlikely(ret)) {
+                       dma_fence_put(fence);
                        return ret;
+               }
 
                dma_fence_put(bo->moving);
                bo->moving = fence;
@@ -1529,12 +1530,13 @@ static void ttm_bo_global_release(void)
        struct ttm_bo_global *glob = &ttm_bo_glob;
 
        mutex_lock(&ttm_global_mutex);
-       if (--glob->use_count > 0)
+       if (--ttm_bo_glob_use_count > 0)
                goto out;
 
        kobject_del(&glob->kobj);
        kobject_put(&glob->kobj);
        ttm_mem_global_release(&ttm_mem_glob);
+       memset(glob, 0, sizeof(*glob));
 out:
        mutex_unlock(&ttm_global_mutex);
 }
@@ -1546,7 +1548,7 @@ static int ttm_bo_global_init(void)
        unsigned i;
 
        mutex_lock(&ttm_global_mutex);
-       if (++glob->use_count > 1)
+       if (++ttm_bo_glob_use_count > 1)
                goto out;
 
        ret = ttm_mem_global_init(&ttm_mem_glob);
index f1567c353b543a3376c6b64ab5fb6c4550f91b23..9a0909decb3668ee1e56a729c7664f3f01a33a72 100644 (file)
@@ -461,8 +461,8 @@ out_no_zone:
 
 void ttm_mem_global_release(struct ttm_mem_global *glob)
 {
-       unsigned int i;
        struct ttm_mem_zone *zone;
+       unsigned int i;
 
        /* let the page allocator first stop the shrink work. */
        ttm_page_alloc_fini();
@@ -475,9 +475,10 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
                zone = glob->zones[i];
                kobject_del(&zone->kobj);
                kobject_put(&zone->kobj);
-                       }
+       }
        kobject_del(&glob->kobj);
        kobject_put(&glob->kobj);
+       memset(glob, 0, sizeof(*glob));
 }
 
 static void ttm_check_swapping(struct ttm_mem_global *glob)
index f841accc2c0064a3edd865423a10818480477f39..627f8dc91d0ed23e0958dfc39d106c967dcd376a 100644 (file)
@@ -730,9 +730,10 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
                        }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-                       if (!(flags & TTM_PAGE_FLAG_DMA32)) {
-                               for (j = 0; j < HPAGE_PMD_NR; ++j)
-                                       if (p++ != pages[i + j])
+                       if (!(flags & TTM_PAGE_FLAG_DMA32) &&
+                           (npages - i) >= HPAGE_PMD_NR) {
+                               for (j = 1; j < HPAGE_PMD_NR; ++j)
+                                       if (++p != pages[i + j])
                                            break;
 
                                if (j == HPAGE_PMD_NR)
@@ -759,15 +760,15 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
                unsigned max_size, n2free;
 
                spin_lock_irqsave(&huge->lock, irq_flags);
-               while (i < npages) {
+               while ((npages - i) >= HPAGE_PMD_NR) {
                        struct page *p = pages[i];
                        unsigned j;
 
                        if (!p)
                                break;
 
-                       for (j = 0; j < HPAGE_PMD_NR; ++j)
-                               if (p++ != pages[i + j])
+                       for (j = 1; j < HPAGE_PMD_NR; ++j)
+                               if (++p != pages[i + j])
                                    break;
 
                        if (j != HPAGE_PMD_NR)
index 66885c24590f0147ce1a510991a546c4f2bbe427..c1bd5e3d9e4aee80bb185cc38307fb389fe54c2f 100644 (file)
 #include "udl_connector.h"
 #include "udl_drv.h"
 
-static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
-                                                          u8 *buff)
+static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
+                              size_t len)
 {
        int ret, i;
        u8 *read_buff;
+       struct udl_device *udl = data;
 
        read_buff = kmalloc(2, GFP_KERNEL);
        if (!read_buff)
-               return false;
+               return -1;
 
-       for (i = 0; i < EDID_LENGTH; i++) {
-               int bval = (i + block_idx * EDID_LENGTH) << 8;
+       for (i = 0; i < len; i++) {
+               int bval = (i + block * EDID_LENGTH) << 8;
                ret = usb_control_msg(udl->udev,
                                      usb_rcvctrlpipe(udl->udev, 0),
                                          (0x02), (0x80 | (0x02 << 5)), bval,
@@ -37,60 +38,13 @@ static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
                if (ret < 1) {
                        DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
                        kfree(read_buff);
-                       return false;
+                       return -1;
                }
-               buff[i] = read_buff[1];
+               buf[i] = read_buff[1];
        }
 
        kfree(read_buff);
-       return true;
-}
-
-static bool udl_get_edid(struct udl_device *udl, u8 **result_buff,
-                        int *result_buff_size)
-{
-       int i, extensions;
-       u8 *block_buff = NULL, *buff_ptr;
-
-       block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL);
-       if (block_buff == NULL)
-               return false;
-
-       if (udl_get_edid_block(udl, 0, block_buff) &&
-           memchr_inv(block_buff, 0, EDID_LENGTH)) {
-               extensions = ((struct edid *)block_buff)->extensions;
-               if (extensions > 0) {
-                       /* we have to read all extensions one by one */
-                       *result_buff_size = EDID_LENGTH * (extensions + 1);
-                       *result_buff = kmalloc(*result_buff_size, GFP_KERNEL);
-                       buff_ptr = *result_buff;
-                       if (buff_ptr == NULL) {
-                               kfree(block_buff);
-                               return false;
-                       }
-                       memcpy(buff_ptr, block_buff, EDID_LENGTH);
-                       kfree(block_buff);
-                       buff_ptr += EDID_LENGTH;
-                       for (i = 1; i < extensions; ++i) {
-                               if (udl_get_edid_block(udl, i, buff_ptr)) {
-                                       buff_ptr += EDID_LENGTH;
-                               } else {
-                                       kfree(*result_buff);
-                                       *result_buff = NULL;
-                                       return false;
-                               }
-                       }
-                       return true;
-               }
-               /* we have only base edid block */
-               *result_buff = block_buff;
-               *result_buff_size = EDID_LENGTH;
-               return true;
-       }
-
-       kfree(block_buff);
-
-       return false;
+       return 0;
 }
 
 static int udl_get_modes(struct drm_connector *connector)
@@ -122,8 +76,6 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
 static enum drm_connector_status
 udl_detect(struct drm_connector *connector, bool force)
 {
-       u8 *edid_buff = NULL;
-       int edid_buff_size = 0;
        struct udl_device *udl = connector->dev->dev_private;
        struct udl_drm_connector *udl_connector =
                                        container_of(connector,
@@ -136,12 +88,10 @@ udl_detect(struct drm_connector *connector, bool force)
                udl_connector->edid = NULL;
        }
 
-
-       if (!udl_get_edid(udl, &edid_buff, &edid_buff_size))
+       udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
+       if (!udl_connector->edid)
                return connector_status_disconnected;
 
-       udl_connector->edid = (struct edid *)edid_buff;
-       
        return connector_status_connected;
 }
 
index 22cd2d13e272f033d3e54b9245986ce22fa74486..ff47f890e6ad8d554fa7180aab449321a34ce5c1 100644 (file)
@@ -52,6 +52,7 @@ static struct drm_driver driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
        .load = udl_driver_load,
        .unload = udl_driver_unload,
+       .release = udl_driver_release,
 
        /* gem hooks */
        .gem_free_object_unlocked = udl_gem_free_object,
index e9e9b1ff678ee0a81d0d4b100b816b19122c7f0c..4ae67d882eae928e6b39fb4f240a46bfc272ed15 100644 (file)
@@ -104,6 +104,7 @@ void udl_urb_completion(struct urb *urb);
 
 int udl_driver_load(struct drm_device *dev, unsigned long flags);
 void udl_driver_unload(struct drm_device *dev);
+void udl_driver_release(struct drm_device *dev);
 
 int udl_fbdev_init(struct drm_device *dev);
 void udl_fbdev_cleanup(struct drm_device *dev);
index d5a23295dd80c1a9c1f2cc202c4c93048fc163ef..bb7b58407039bbbb099a371b9a432dc12983f886 100644 (file)
@@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
        *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
 
 out:
-       drm_gem_object_put(&gobj->base);
+       drm_gem_object_put_unlocked(&gobj->base);
 unlock:
        mutex_unlock(&udl->gem_lock);
        return ret;
index 9086d0d1b880de87de7609e55798e534b5cb3039..1f8ef34ade24365bce4f522104be8347eafb1586 100644 (file)
@@ -379,6 +379,12 @@ void udl_driver_unload(struct drm_device *dev)
                udl_free_urb_list(dev);
 
        udl_fbdev_cleanup(dev);
-       udl_modeset_cleanup(dev);
        kfree(udl);
 }
+
+void udl_driver_release(struct drm_device *dev)
+{
+       udl_modeset_cleanup(dev);
+       drm_dev_fini(dev);
+       kfree(dev);
+}
index 730008d3da761e2eb37d9821c518672567a6e1c6..1baa10e9448472510006b7390e3e574841c0163a 100644 (file)
@@ -1042,7 +1042,7 @@ static void
 vc4_crtc_reset(struct drm_crtc *crtc)
 {
        if (crtc->state)
-               __drm_atomic_helper_crtc_destroy_state(crtc->state);
+               vc4_crtc_destroy_state(crtc, crtc->state);
 
        crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
        if (crtc->state)
index 5930facd6d2d85cca81cb9c1f5247a6be3632546..11a8f99ba18c5f007734abef1003cc44d5e778a1 100644 (file)
@@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
        ret = drm_gem_handle_create(file, &obj->base, handle);
        drm_gem_object_put_unlocked(&obj->base);
        if (ret)
-               goto err;
+               return ERR_PTR(ret);
 
        return &obj->base;
-
-err:
-       __vgem_gem_destroy(obj);
-       return ERR_PTR(ret);
 }
 
 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
index b996ac1d4fcc9cb3ca1ff86cad8be0c364c80c30..af92964b6889dd0dbfaadac5558cb27ee78b3d56 100644 (file)
@@ -205,10 +205,14 @@ static struct drm_driver driver = {
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = virtio_gpu_debugfs_init,
 #endif
+       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_export = drm_gem_prime_export,
        .gem_prime_import = drm_gem_prime_import,
        .gem_prime_pin = virtgpu_gem_prime_pin,
        .gem_prime_unpin = virtgpu_gem_prime_unpin,
+       .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
+       .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
        .gem_prime_vmap = virtgpu_gem_prime_vmap,
        .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
        .gem_prime_mmap = virtgpu_gem_prime_mmap,
index 3238fdf58eb480ed9447d0639aaded9a88d28dcc..d577cb76f5ad6b66d26124284159c82706f44699 100644 (file)
@@ -354,6 +354,10 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
 /* virtgpu_prime.c */
 int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
 void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
+struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
+       struct drm_device *dev, struct dma_buf_attachment *attach,
+       struct sg_table *sgt);
 void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
 void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
index c59ec34c80a5df2c6b3a91f7ec73cd05f85445ad..eb51a78e11991c01cce73d34cf74907cf9202764 100644 (file)
@@ -39,6 +39,18 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
        WARN_ONCE(1, "not implemented");
 }
 
+struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+       return ERR_PTR(-ENODEV);
+}
+
+struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
+       struct drm_device *dev, struct dma_buf_attachment *attach,
+       struct sg_table *table)
+{
+       return ERR_PTR(-ENODEV);
+}
+
 void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
 {
        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
index 138b0bb325cf9662cd59b5a54158947dc691a2d9..69048e73377dc97855aa3b71491008e5993a5304 100644 (file)
@@ -111,11 +111,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
 
        ret = drm_gem_handle_create(file, &obj->gem, handle);
        drm_gem_object_put_unlocked(&obj->gem);
-       if (ret) {
-               drm_gem_object_release(&obj->gem);
-               kfree(obj);
+       if (ret)
                return ERR_PTR(ret);
-       }
 
        return &obj->gem;
 }
index 6165fe2c4504de07d0626c92892293db75d7354c..1bfa353d995cf5bb7ca4c4d1ce8a46ba686e55d6 100644 (file)
@@ -545,30 +545,14 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
        dev_priv->initial_height = height;
 }
 
-/**
- * vmw_assume_iommu - Figure out whether coherent dma-remapping might be
- * taking place.
- * @dev: Pointer to the struct drm_device.
- *
- * Return: true if iommu present, false otherwise.
- */
-static bool vmw_assume_iommu(struct drm_device *dev)
-{
-       const struct dma_map_ops *ops = get_dma_ops(dev->dev);
-
-       return !dma_is_direct(ops) && ops &&
-               ops->map_page != dma_direct_map_page;
-}
-
 /**
  * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
  * system.
  *
  * @dev_priv: Pointer to a struct vmw_private
  *
- * This functions tries to determine the IOMMU setup and what actions
- * need to be taken by the driver to make system pages visible to the
- * device.
+ * This functions tries to determine what actions need to be taken by the
+ * driver to make system pages visible to the device.
  * If this function decides that DMA is not possible, it returns -EINVAL.
  * The driver may then try to disable features of the device that require
  * DMA.
@@ -578,23 +562,16 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
        static const char *names[vmw_dma_map_max] = {
                [vmw_dma_phys] = "Using physical TTM page addresses.",
                [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
-               [vmw_dma_map_populate] = "Keeping DMA mappings.",
+               [vmw_dma_map_populate] = "Caching DMA mappings.",
                [vmw_dma_map_bind] = "Giving up DMA mappings early."};
 
        if (vmw_force_coherent)
                dev_priv->map_mode = vmw_dma_alloc_coherent;
-       else if (vmw_assume_iommu(dev_priv->dev))
-               dev_priv->map_mode = vmw_dma_map_populate;
-       else if (!vmw_force_iommu)
-               dev_priv->map_mode = vmw_dma_phys;
-       else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
-               dev_priv->map_mode = vmw_dma_alloc_coherent;
+       else if (vmw_restrict_iommu)
+               dev_priv->map_mode = vmw_dma_map_bind;
        else
                dev_priv->map_mode = vmw_dma_map_populate;
 
-       if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
-               dev_priv->map_mode = vmw_dma_map_bind;
-
        /* No TTM coherent page pool? FIXME: Ask TTM instead! */
         if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
            (dev_priv->map_mode == vmw_dma_alloc_coherent))
index b913a56f3426669f21582e271fac9add830bb91d..2a9112515f464c320628d64b8a9d92c645f730dd 100644 (file)
@@ -564,11 +564,9 @@ static int vmw_fb_set_par(struct fb_info *info)
                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
        };
-       struct drm_display_mode *old_mode;
        struct drm_display_mode *mode;
        int ret;
 
-       old_mode = par->set_mode;
        mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
        if (!mode) {
                DRM_ERROR("Could not create new fb mode.\n");
@@ -579,11 +577,7 @@ static int vmw_fb_set_par(struct fb_info *info)
        mode->vdisplay = var->yres;
        vmw_guess_mode_timing(mode);
 
-       if (old_mode && drm_mode_equal(old_mode, mode)) {
-               drm_mode_destroy(vmw_priv->dev, mode);
-               mode = old_mode;
-               old_mode = NULL;
-       } else if (!vmw_kms_validate_mode_vram(vmw_priv,
+       if (!vmw_kms_validate_mode_vram(vmw_priv,
                                        mode->hdisplay *
                                        DIV_ROUND_UP(var->bits_per_pixel, 8),
                                        mode->vdisplay)) {
@@ -620,8 +614,8 @@ static int vmw_fb_set_par(struct fb_info *info)
        schedule_delayed_work(&par->local_work, 0);
 
 out_unlock:
-       if (old_mode)
-               drm_mode_destroy(vmw_priv->dev, old_mode);
+       if (par->set_mode)
+               drm_mode_destroy(vmw_priv->dev, par->set_mode);
        par->set_mode = mode;
 
        mutex_unlock(&par->bo_mutex);
index b93c558dd86e0121741284becc87434a27b39d2a..7da752ca1c34bd06497e1491d264921c33011c80 100644 (file)
@@ -57,7 +57,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
 
        id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
        if (id < 0)
-               return id;
+               return (id != -ENOMEM ? 0 : id);
 
        spin_lock(&gman->lock);
 
index 27101c04a8272668988ce5be66dfc584068f8a60..0c0eb43abf657f2369872b3f5026b3a965f4a298 100644 (file)
@@ -115,8 +115,12 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
 static void host1x_channel_set_streamid(struct host1x_channel *channel)
 {
 #if HOST1X_HW >= 6
+       u32 sid = 0x7f;
+#ifdef CONFIG_IOMMU_API
        struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent);
-       u32 sid = spec ? spec->ids[0] & 0xffff : 0x7f;
+       if (spec)
+               sid = spec->ids[0] & 0xffff;
+#endif
 
        host1x_ch_writel(channel, sid, HOST1X_CHANNEL_SMMU_STREAMID);
 #endif
index 9b2b3fa479c462d1c4d7b8b02180ad22eb20a715..5e44ff1f20851a16afdb42bfdaf73caab97ebff5 100644 (file)
@@ -195,7 +195,8 @@ int ipu_dp_setup_channel(struct ipu_dp *dp,
                ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
                                DP_COM_CONF_CSC_DEF_BOTH);
        } else {
-               if (flow->foreground.in_cs == flow->out_cs)
+               if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN ||
+                   flow->foreground.in_cs == flow->out_cs)
                        /*
                         * foreground identical to output, apply color
                         * conversion on background
@@ -261,6 +262,8 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
        struct ipu_dp_priv *priv = flow->priv;
        u32 reg, csc;
 
+       dp->in_cs = IPUV3_COLORSPACE_UNKNOWN;
+
        if (!dp->foreground)
                return;
 
@@ -268,8 +271,9 @@ void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
 
        reg = readl(flow->base + DP_COM_CONF);
        csc = reg & DP_COM_CONF_CSC_DEF_MASK;
-       if (csc == DP_COM_CONF_CSC_DEF_FG)
-               reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+       reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+       if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG)
+               reg |= DP_COM_CONF_CSC_DEF_BG;
 
        reg &= ~DP_COM_CONF_FG_EN;
        writel(reg, flow->base + DP_COM_CONF);
@@ -347,6 +351,8 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
        mutex_init(&priv->mutex);
 
        for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
+               priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN;
+               priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN;
                priv->flow[i].foreground.foreground = true;
                priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
                priv->flow[i].priv = priv;
index 6ca8d322b487279348d90513caa10d6bb6745e40..4ca0cdfa6b33af35f951a3969cf9c02b67d7a1b0 100644 (file)
@@ -150,6 +150,7 @@ config HID_ASUS
        tristate "Asus"
        depends on LEDS_CLASS
        depends on ASUS_WMI || ASUS_WMI=n
+       select POWER_SUPPLY
        ---help---
        Support for Asus notebook built-in keyboard and touchpad via i2c, and
        the Asus Republic of Gamers laptop keyboard special keys.
index 9993b692598fb84d1700e26ef7f97856ff842955..860e21ec6a492a35392f2b47146f5e0a811c1068 100644 (file)
@@ -1301,10 +1301,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
 u32 hid_field_extract(const struct hid_device *hid, u8 *report,
                        unsigned offset, unsigned n)
 {
-       if (n > 32) {
-               hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
+       if (n > 256) {
+               hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
                         n, current->comm);
-               n = 32;
+               n = 256;
        }
 
        return __extract(report, offset, n);
index ac9fda1b5a7233c227cd5517dcf6331a29483a60..1384e57182af978e4329c9e946e3487f85229be5 100644 (file)
@@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
        seq_printf(f, "\n\n");
 
        /* dump parsed data and input mappings */
+       if (down_interruptible(&hdev->driver_input_lock))
+               return 0;
+
        hid_dump_device(hdev, f);
        seq_printf(f, "\n");
        hid_dump_input_mapping(hdev, f);
 
+       up(&hdev->driver_input_lock);
+
        return 0;
 }
 
index b6d93f4ad037e440d1e5d23d76058e4be606159c..adce58f24f7638a70c170f17a694b5baa7f5a49a 100644 (file)
 #define USB_DEVICE_ID_SYNAPTICS_HD     0x0ac3
 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD        0x1ac3
 #define USB_DEVICE_ID_SYNAPTICS_TP_V103        0x5710
+#define I2C_DEVICE_ID_SYNAPTICS_7E7E   0x7e7e
 
 #define USB_VENDOR_ID_TEXAS_INSTRUMENTS        0x2047
 #define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA    0x0855
index b10b1922c5bdf304a4f32100365da00f0f1572f4..b607286a0bc82f360a133b5dce204a8f2441ff5c 100644 (file)
@@ -680,6 +680,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                        break;
                }
 
+               if ((usage->hid & 0xf0) == 0xb0) {      /* SC - Display */
+                       switch (usage->hid & 0xf) {
+                       case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break;
+                       default: goto ignore;
+                       }
+                       break;
+               }
+
                /*
                 * Some lazy vendors declare 255 usages for System Control,
                 * leading to the creation of ABS_X|Y axis and too many others.
@@ -902,7 +910,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x06a: map_key_clear(KEY_GREEN);           break;
                case 0x06b: map_key_clear(KEY_BLUE);            break;
                case 0x06c: map_key_clear(KEY_YELLOW);          break;
-               case 0x06d: map_key_clear(KEY_ZOOM);            break;
+               case 0x06d: map_key_clear(KEY_ASPECT_RATIO);    break;
 
                case 0x06f: map_key_clear(KEY_BRIGHTNESSUP);            break;
                case 0x070: map_key_clear(KEY_BRIGHTNESSDOWN);          break;
@@ -911,6 +919,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX);          break;
                case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO);         break;
 
+               case 0x079: map_key_clear(KEY_KBDILLUMUP);      break;
+               case 0x07a: map_key_clear(KEY_KBDILLUMDOWN);    break;
+               case 0x07c: map_key_clear(KEY_KBDILLUMTOGGLE);  break;
+
                case 0x082: map_key_clear(KEY_VIDEO_NEXT);      break;
                case 0x083: map_key_clear(KEY_LAST);            break;
                case 0x084: map_key_clear(KEY_ENTER);           break;
@@ -998,6 +1010,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x1b8: map_key_clear(KEY_VIDEO);           break;
                case 0x1bc: map_key_clear(KEY_MESSENGER);       break;
                case 0x1bd: map_key_clear(KEY_INFO);            break;
+               case 0x1cb: map_key_clear(KEY_ASSISTANT);       break;
                case 0x201: map_key_clear(KEY_NEW);             break;
                case 0x202: map_key_clear(KEY_OPEN);            break;
                case 0x203: map_key_clear(KEY_CLOSE);           break;
@@ -1021,6 +1034,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x22d: map_key_clear(KEY_ZOOMIN);          break;
                case 0x22e: map_key_clear(KEY_ZOOMOUT);         break;
                case 0x22f: map_key_clear(KEY_ZOOMRESET);       break;
+               case 0x232: map_key_clear(KEY_FULL_SCREEN);     break;
                case 0x233: map_key_clear(KEY_SCROLLUP);        break;
                case 0x234: map_key_clear(KEY_SCROLLDOWN);      break;
                case 0x238: /* AC Pan */
@@ -1044,6 +1058,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT);   break;
                case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL);   break;
 
+               case 0x29f: map_key_clear(KEY_SCALE);           break;
+
                default: map_key_clear(KEY_UNKNOWN);
                }
                break;
index 15ed6177a7a364d6b2634babe0df1be83b4cec7b..199cc256e9d9d3903016f64f66b36b909a9c1109 100644 (file)
@@ -2111,6 +2111,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
                kfree(data);
                return -ENOMEM;
        }
+       data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
+       if (!data->wq) {
+               kfree(data->effect_ids);
+               kfree(data);
+               return -ENOMEM;
+       }
+
        data->hidpp = hidpp;
        data->feature_index = feature_index;
        data->version = version;
@@ -2155,7 +2162,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
        /* ignore boost value at response.fap.params[2] */
 
        /* init the hardware command queue */
-       data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
        atomic_set(&data->workqueue_size, 0);
 
        /* initialize with zero autocenter to get wheel in usable state */
@@ -2608,8 +2614,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
                input_report_rel(mydata->input, REL_Y, v);
 
                v = hid_snto32(data[6], 8);
-               hidpp_scroll_counter_handle_scroll(
-                               &hidpp->vertical_wheel_counter, v);
+               if (v != 0)
+                       hidpp_scroll_counter_handle_scroll(
+                                       &hidpp->vertical_wheel_counter, v);
 
                input_sync(mydata->input);
        }
index 953908f2267c0653478cf88d53c7e85fdb121d76..77ffba48cc737e0df69892cfcceaafacb9815534 100644 (file)
@@ -715,7 +715,6 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
-       { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
@@ -855,7 +854,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { }
 };
 
-/**
+/*
  * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
  *
  * There are composite devices for which we want to ignore only a certain
@@ -996,6 +995,10 @@ bool hid_ignore(struct hid_device *hdev)
                if (hdev->product == 0x0401 &&
                    strncmp(hdev->name, "ELAN0800", 8) != 0)
                        return true;
+               /* Same with product id 0x0400 */
+               if (hdev->product == 0x0400 &&
+                   strncmp(hdev->name, "QTEC0001", 8) != 0)
+                       return true;
                break;
        }
 
@@ -1042,7 +1045,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
        }
 
        if (bl_entry != NULL)
-               dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n",
+               dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%04x:0x%04x\n",
                        bl_entry->driver_data, bl_entry->vendor,
                        bl_entry->product);
 
@@ -1209,7 +1212,7 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
                quirks |= bl_entry->driver_data;
 
        if (quirks)
-               dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n",
+               dbg_hid("Found squirk 0x%lx for HID device 0x%04x:0x%04x\n",
                        quirks, hdev->vendor, hdev->product);
        return quirks;
 }
index 8141cadfca0e3c3ce62eccff1c28cd94031827b1..8dae0f9b819e011d6695462fea7e88e85cd16669 100644 (file)
@@ -499,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam)
 static int steam_register(struct steam_device *steam)
 {
        int ret;
+       bool client_opened;
 
        /*
         * This function can be called several times in a row with the
@@ -511,9 +512,11 @@ static int steam_register(struct steam_device *steam)
                 * Unlikely, but getting the serial could fail, and it is not so
                 * important, so make up a serial number and go on.
                 */
+               mutex_lock(&steam->mutex);
                if (steam_get_serial(steam) < 0)
                        strlcpy(steam->serial_no, "XXXXXXXXXX",
                                        sizeof(steam->serial_no));
+               mutex_unlock(&steam->mutex);
 
                hid_info(steam->hdev, "Steam Controller '%s' connected",
                                steam->serial_no);
@@ -528,13 +531,15 @@ static int steam_register(struct steam_device *steam)
        }
 
        mutex_lock(&steam->mutex);
-       if (!steam->client_opened) {
+       client_opened = steam->client_opened;
+       if (!client_opened)
                steam_set_lizard_mode(steam, lizard_mode);
+       mutex_unlock(&steam->mutex);
+
+       if (!client_opened)
                ret = steam_input_register(steam);
-       } else {
+       else
                ret = 0;
-       }
-       mutex_unlock(&steam->mutex);
 
        return ret;
 }
@@ -630,14 +635,21 @@ static void steam_client_ll_close(struct hid_device *hdev)
 {
        struct steam_device *steam = hdev->driver_data;
 
+       unsigned long flags;
+       bool connected;
+
+       spin_lock_irqsave(&steam->lock, flags);
+       connected = steam->connected;
+       spin_unlock_irqrestore(&steam->lock, flags);
+
        mutex_lock(&steam->mutex);
        steam->client_opened = false;
+       if (connected)
+               steam_set_lizard_mode(steam, lizard_mode);
        mutex_unlock(&steam->mutex);
 
-       if (steam->connected) {
-               steam_set_lizard_mode(steam, lizard_mode);
+       if (connected)
                steam_input_register(steam);
-       }
 }
 
 static int steam_client_ll_raw_request(struct hid_device *hdev,
index 7710d9f957da5b0dd07ca1444416de7cecd10529..0187c9f8fc22c5567e934cc0cc2089877963c56e 100644 (file)
@@ -735,10 +735,6 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
                goto cleanup;
        }
        rc = usb_string(udev, 201, ver_ptr, ver_len);
-       if (ver_ptr == NULL) {
-               rc = -ENOMEM;
-               goto cleanup;
-       }
        if (rc == -EPIPE) {
                *ver_ptr = '\0';
        } else if (rc < 0) {
index 90164fed08d35eca2c34250c8b7cb3814ea99f53..4d1f24ee249c4455a4d5dfe18c7e6b0541311ad4 100644 (file)
@@ -184,6 +184,8 @@ static const struct i2c_hid_quirks {
                I2C_HID_QUIRK_NO_RUNTIME_PM },
        { USB_VENDOR_ID_ELAN, HID_ANY_ID,
                 I2C_HID_QUIRK_BOGUS_IRQ },
+       { USB_VENDOR_ID_SYNAPTICS, I2C_DEVICE_ID_SYNAPTICS_7E7E,
+               I2C_HID_QUIRK_NO_RUNTIME_PM },
        { 0, 0 }
 };
 
index 6f929bfa9fcd39380f7e9d9fc9729156e28e09f6..d0f1dfe2bcbbd652aa1daa682d2feac611dfa4da 100644 (file)
@@ -1759,6 +1759,7 @@ config SENSORS_VT8231
 config SENSORS_W83773G
        tristate "Nuvoton W83773G"
        depends on I2C
+       select REGMAP_I2C
        help
          If you say yes here you get support for the Nuvoton W83773G hardware
          monitoring chip.
index e4f9f7ce92fabc7c5f10aebaf5d69d350da565d2..f9abeeeead9e966dd8c8df7d225b81788eb67e40 100644 (file)
@@ -640,7 +640,7 @@ static const struct hwmon_channel_info ntc_chip = {
 };
 
 static const u32 ntc_temp_config[] = {
-       HWMON_T_INPUT, HWMON_T_TYPE,
+       HWMON_T_INPUT | HWMON_T_TYPE,
        0
 };
 
index b91a80abf724d087e02cc7aa97e211a1380f6570..4679acb4918e7f65660a8e0e32a7c5f793f03ff0 100644 (file)
@@ -890,6 +890,8 @@ static int occ_setup_sensor_attrs(struct occ *occ)
                                s++;
                        }
                }
+
+               s = (sensors->power.num_sensors * 4) + 1;
        } else {
                for (i = 0; i < sensors->power.num_sensors; ++i) {
                        s = i + 1;
@@ -918,11 +920,11 @@ static int occ_setup_sensor_attrs(struct occ *occ)
                                                     show_power, NULL, 3, i);
                        attr++;
                }
-       }
 
-       if (sensors->caps.num_sensors >= 1) {
                s = sensors->power.num_sensors + 1;
+       }
 
+       if (sensors->caps.num_sensors >= 1) {
                snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
                attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
                                             0, 0);
index f2c6819712013046246002346af928bd1ab16bc0..f8979abb9a19ca963bf9625fc911ab74590b388a 100644 (file)
@@ -131,6 +131,7 @@ config I2C_I801
            Cannon Lake (PCH)
            Cedar Fork (PCH)
            Ice Lake (PCH)
+           Comet Lake (PCH)
 
          This driver can also be built as a module.  If so, the module
          will be called i2c-i801.
index bb8e3f149979649c78993296d8337892bfd4e7f1..d464799e40a302677908d15eec3b3ff39f625a06 100644 (file)
@@ -426,8 +426,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
 
        pm_runtime_get_sync(dev->dev);
 
-       if (dev->suspended) {
-               dev_err(dev->dev, "Error %s call while suspended\n", __func__);
+       if (dev_WARN_ONCE(dev->dev, dev->suspended, "Transfer while suspended\n")) {
                ret = -ESHUTDOWN;
                goto done_nolock;
        }
index c91e145ef5a56dbb1a512c23611f06ad7d22aa05..679c6c41f64b49babf8b0a7505d56a2c4093f6c7 100644 (file)
@@ -71,6 +71,7 @@
  * Cannon Lake-LP (PCH)                0x9da3  32      hard    yes     yes     yes
  * Cedar Fork (PCH)            0x18df  32      hard    yes     yes     yes
  * Ice Lake-LP (PCH)           0x34a3  32      hard    yes     yes     yes
+ * Comet Lake (PCH)            0x02a3  32      hard    yes     yes     yes
  *
  * Features supported by this driver:
  * Software PEC                                no
 #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS       0xa223
 #define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS       0xa2a3
 #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS         0xa323
+#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS            0x02a3
 
 struct i801_mux_config {
        char *gpio_chip;
@@ -1038,6 +1040,7 @@ static const struct pci_device_id i801_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
        { 0, }
 };
 
@@ -1534,6 +1537,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
        case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
        case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
+       case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
                priv->features |= FEATURE_I2C_BLOCK_READ;
                priv->features |= FEATURE_IRQ;
                priv->features |= FEATURE_SMBUS_PEC;
index 42fed40198a0fb77981e90236c465c9a12bff218..fd70b110e8f4e30c36b389e9a1745e00db16069c 100644 (file)
@@ -515,9 +515,9 @@ static int i2c_imx_clk_notifier_call(struct notifier_block *nb,
                                     unsigned long action, void *data)
 {
        struct clk_notifier_data *ndata = data;
-       struct imx_i2c_struct *i2c_imx = container_of(&ndata->clk,
+       struct imx_i2c_struct *i2c_imx = container_of(nb,
                                                      struct imx_i2c_struct,
-                                                     clk);
+                                                     clk_change_nb);
 
        if (action & POST_RATE_CHANGE)
                i2c_imx_set_clk(i2c_imx, ndata->new_rate);
@@ -1169,11 +1169,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
        /* Init DMA config if supported */
        ret = i2c_imx_dma_request(i2c_imx, phy_addr);
        if (ret < 0)
-               goto clk_notifier_unregister;
+               goto del_adapter;
 
        dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
        return 0;   /* Return OK */
 
+del_adapter:
+       i2c_del_adapter(&i2c_imx->adapter);
 clk_notifier_unregister:
        clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
 rpm_disable:
index d18b0941b71a4d37d5b896645c68a1ec4b8e49a4..f14d4b3fab446fe698f41e2e37c6274320611ccc 100644 (file)
@@ -597,6 +597,8 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
        i2c->adapter = synquacer_i2c_ops;
        i2c_set_adapdata(&i2c->adapter, i2c);
        i2c->adapter.dev.parent = &pdev->dev;
+       i2c->adapter.dev.of_node = pdev->dev.of_node;
+       ACPI_COMPANION_SET(&i2c->adapter.dev, ACPI_COMPANION(&pdev->dev));
        i2c->adapter.nr = pdev->id;
        init_completion(&i2c->completion);
 
index 38af18645133cb486d6494bb642128414f2194eb..688aa3b5f3ac0cc338848015fcf0fc8bd862e8d1 100644 (file)
@@ -185,7 +185,7 @@ static int i2c_generic_bus_free(struct i2c_adapter *adap)
 int i2c_generic_scl_recovery(struct i2c_adapter *adap)
 {
        struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
-       int i = 0, scl = 1, ret;
+       int i = 0, scl = 1, ret = 0;
 
        if (bri->prepare_recovery)
                bri->prepare_recovery(adap);
@@ -327,6 +327,8 @@ static int i2c_device_probe(struct device *dev)
 
                if (client->flags & I2C_CLIENT_HOST_NOTIFY) {
                        dev_dbg(dev, "Using Host Notify IRQ\n");
+                       /* Keep adapter active when Host Notify is required */
+                       pm_runtime_get_sync(&client->adapter->dev);
                        irq = i2c_smbus_host_notify_to_irq(client);
                } else if (dev->of_node) {
                        irq = of_irq_get_byname(dev->of_node, "irq");
@@ -431,6 +433,8 @@ static int i2c_device_remove(struct device *dev)
        device_init_wakeup(&client->dev, false);
 
        client->irq = client->init_irq;
+       if (client->flags & I2C_CLIENT_HOST_NOTIFY)
+               pm_runtime_put(&client->adapter->dev);
 
        return status;
 }
index 2dc628d4f1aee1b5c07593f85e7d75a6bdb3d0be..1412abcff01095cd001ece69bb80a69d714a80ba 100644 (file)
@@ -1980,7 +1980,6 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
 {
        struct i3c_dev_boardinfo *boardinfo;
        struct device *dev = &master->dev;
-       struct i3c_device_info info = { };
        enum i3c_addr_slot_status addrstatus;
        u32 init_dyn_addr = 0;
 
@@ -2012,8 +2011,8 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
 
        boardinfo->pid = ((u64)reg[1] << 32) | reg[2];
 
-       if ((info.pid & GENMASK_ULL(63, 48)) ||
-           I3C_PID_RND_LOWER_32BITS(info.pid))
+       if ((boardinfo->pid & GENMASK_ULL(63, 48)) ||
+           I3C_PID_RND_LOWER_32BITS(boardinfo->pid))
                return -EINVAL;
 
        boardinfo->init_dyn_addr = init_dyn_addr;
index 59279224e07fcefa460b1939212a5009eb8a3681..10c26ffaa8effe969464c6abc1ef6f198c069744 100644 (file)
@@ -300,7 +300,7 @@ to_dw_i3c_master(struct i3c_master_controller *master)
 
 static void dw_i3c_master_disable(struct dw_i3c_master *master)
 {
-       writel(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_ENABLE,
+       writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE,
               master->regs + DEVICE_CTRL);
 }
 
index 7096e577b23f86f5f71aa36beae3e5526ab856d7..50f3ff386bea43f8853c5f5e7426c88c0e3f0851 100644 (file)
@@ -1437,6 +1437,8 @@ static int kxcjk1013_resume(struct device *dev)
 
        mutex_lock(&data->mutex);
        ret = kxcjk1013_set_mode(data, OPERATION);
+       if (ret == 0)
+               ret = kxcjk1013_set_range(data, data->range);
        mutex_unlock(&data->mutex);
 
        return ret;
index ff5f2da2e1b134d369fbc6ce7180a9ebf0de4ec1..54d9978b274055da963ed282b6b94c0b4245fed5 100644 (file)
@@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta,
        if (sigma_delta->info->has_registers) {
                data[0] = reg << sigma_delta->info->addr_shift;
                data[0] |= sigma_delta->info->read_mask;
+               data[0] |= sigma_delta->comm;
                spi_message_add_tail(&t[0], &m);
        }
        spi_message_add_tail(&t[1], &m);
index 75d2f73582a3d7581e533afd361cdb7af7df46d0..596841a3c4db77f59f5fc7c3c3f0f1fddc21aab7 100644 (file)
@@ -704,23 +704,29 @@ static int at91_adc_read_raw(struct iio_dev *idev,
                ret = wait_event_interruptible_timeout(st->wq_data_avail,
                                                       st->done,
                                                       msecs_to_jiffies(1000));
-               if (ret == 0)
-                       ret = -ETIMEDOUT;
-               if (ret < 0) {
-                       mutex_unlock(&st->lock);
-                       return ret;
-               }
-
-               *val = st->last_value;
 
+               /* Disable interrupts, regardless if adc conversion was
+                * successful or not
+                */
                at91_adc_writel(st, AT91_ADC_CHDR,
                                AT91_ADC_CH(chan->channel));
                at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel));
 
-               st->last_value = 0;
-               st->done = false;
+               if (ret > 0) {
+                       /* a valid conversion took place */
+                       *val = st->last_value;
+                       st->last_value = 0;
+                       st->done = false;
+                       ret = IIO_VAL_INT;
+               } else if (ret == 0) {
+                       /* conversion timeout */
+                       dev_err(&idev->dev, "ADC Channel %d timeout.\n",
+                               chan->channel);
+                       ret = -ETIMEDOUT;
+               }
+
                mutex_unlock(&st->lock);
-               return IIO_VAL_INT;
+               return ret;
 
        case IIO_CHAN_INFO_SCALE:
                *val = st->vref_mv;
index b13c61539d46baf3490be318342dac64b6b1dc4b..6401ca7a9a2072e9760144c7b71717d985db6d27 100644 (file)
@@ -1292,6 +1292,7 @@ static int xadc_probe(struct platform_device *pdev)
 
 err_free_irq:
        free_irq(xadc->irq, indio_dev);
+       cancel_delayed_work_sync(&xadc->zynq_unmask_work);
 err_clk_disable_unprepare:
        clk_disable_unprepare(xadc->clk);
 err_free_samplerate_trigger:
@@ -1321,8 +1322,8 @@ static int xadc_remove(struct platform_device *pdev)
                iio_triggered_buffer_cleanup(indio_dev);
        }
        free_irq(xadc->irq, indio_dev);
+       cancel_delayed_work_sync(&xadc->zynq_unmask_work);
        clk_disable_unprepare(xadc->clk);
-       cancel_delayed_work(&xadc->zynq_unmask_work);
        kfree(xadc->data);
        kfree(indio_dev->channels);
 
index d5d146e9e372305852e7dc20c5492fc48e5218b1..92c684d2b67ecfd7992327e8703812c5f7cdd94f 100644 (file)
@@ -64,6 +64,7 @@ config IAQCORE
 config PMS7003
        tristate "Plantower PMS7003 particulate matter sensor"
        depends on SERIAL_DEV_BUS
+       select IIO_TRIGGERED_BUFFER
        help
          Say Y here to build support for the Plantower PMS7003 particulate
          matter sensor.
@@ -71,6 +72,19 @@ config PMS7003
          To compile this driver as a module, choose M here: the module will
          be called pms7003.
 
+config SENSIRION_SGP30
+       tristate "Sensirion SGPxx gas sensors"
+       depends on I2C
+       select CRC8
+       help
+         Say Y here to build I2C interface support for the following
+         Sensirion SGP gas sensors:
+           * SGP30 gas sensor
+           * SGPC3 low power gas sensor
+
+         To compile this driver as module, choose M here: the
+         module will be called sgp30.
+
 config SPS30
        tristate "SPS30 particulate matter sensor"
        depends on I2C
index 0ae89b87e2d6451fbe177e0978b2179ab4c4487b..4edc5d21cb9fa63739d70fee23976832f1cf9313 100644 (file)
@@ -2,11 +2,9 @@
 #ifndef BME680_H_
 #define BME680_H_
 
-#define BME680_REG_CHIP_I2C_ID                 0xD0
-#define BME680_REG_CHIP_SPI_ID                 0x50
+#define BME680_REG_CHIP_ID                     0xD0
 #define   BME680_CHIP_ID_VAL                   0x61
-#define BME680_REG_SOFT_RESET_I2C              0xE0
-#define BME680_REG_SOFT_RESET_SPI              0x60
+#define BME680_REG_SOFT_RESET                  0xE0
 #define   BME680_CMD_SOFTRESET                 0xB6
 #define BME680_REG_STATUS                      0x73
 #define   BME680_SPI_MEM_PAGE_BIT              BIT(4)
index 70c1fe4366f4c6a17100b469452f8903281e665e..ccde4c65ff9340b2bcc4a730dc978daf918fa08c 100644 (file)
@@ -63,9 +63,23 @@ struct bme680_data {
        s32 t_fine;
 };
 
+static const struct regmap_range bme680_volatile_ranges[] = {
+       regmap_reg_range(BME680_REG_MEAS_STAT_0, BME680_REG_GAS_R_LSB),
+       regmap_reg_range(BME680_REG_STATUS, BME680_REG_STATUS),
+       regmap_reg_range(BME680_T2_LSB_REG, BME680_GH3_REG),
+};
+
+static const struct regmap_access_table bme680_volatile_table = {
+       .yes_ranges     = bme680_volatile_ranges,
+       .n_yes_ranges   = ARRAY_SIZE(bme680_volatile_ranges),
+};
+
 const struct regmap_config bme680_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
+       .max_register = 0xef,
+       .volatile_table = &bme680_volatile_table,
+       .cache_type = REGCACHE_RBTREE,
 };
 EXPORT_SYMBOL(bme680_regmap_config);
 
@@ -316,6 +330,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
        s64 var1, var2, var3;
        s16 calc_temp;
 
+       /* If the calibration is invalid, attempt to reload it */
+       if (!calib->par_t2)
+               bme680_read_calib(data, calib);
+
        var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
        var2 = (var1 * calib->par_t2) >> 11;
        var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
@@ -583,8 +601,7 @@ static int bme680_gas_config(struct bme680_data *data)
        return ret;
 }
 
-static int bme680_read_temp(struct bme680_data *data,
-                           int *val, int *val2)
+static int bme680_read_temp(struct bme680_data *data, int *val)
 {
        struct device *dev = regmap_get_device(data->regmap);
        int ret;
@@ -617,10 +634,9 @@ static int bme680_read_temp(struct bme680_data *data,
         * compensate_press/compensate_humid to get compensated
         * pressure/humidity readings.
         */
-       if (val && val2) {
-               *val = comp_temp;
-               *val2 = 100;
-               return IIO_VAL_FRACTIONAL;
+       if (val) {
+               *val = comp_temp * 10; /* Centidegrees to millidegrees */
+               return IIO_VAL_INT;
        }
 
        return ret;
@@ -635,7 +651,7 @@ static int bme680_read_press(struct bme680_data *data,
        s32 adc_press;
 
        /* Read and compensate temperature to get a reading of t_fine */
-       ret = bme680_read_temp(data, NULL, NULL);
+       ret = bme680_read_temp(data, NULL);
        if (ret < 0)
                return ret;
 
@@ -668,7 +684,7 @@ static int bme680_read_humid(struct bme680_data *data,
        u32 comp_humidity;
 
        /* Read and compensate temperature to get a reading of t_fine */
-       ret = bme680_read_temp(data, NULL, NULL);
+       ret = bme680_read_temp(data, NULL);
        if (ret < 0)
                return ret;
 
@@ -761,7 +777,7 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
        case IIO_CHAN_INFO_PROCESSED:
                switch (chan->type) {
                case IIO_TEMP:
-                       return bme680_read_temp(data, val, val2);
+                       return bme680_read_temp(data, val);
                case IIO_PRESSURE:
                        return bme680_read_press(data, val, val2);
                case IIO_HUMIDITYRELATIVE:
@@ -867,8 +883,28 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
 {
        struct iio_dev *indio_dev;
        struct bme680_data *data;
+       unsigned int val;
        int ret;
 
+       ret = regmap_write(regmap, BME680_REG_SOFT_RESET,
+                          BME680_CMD_SOFTRESET);
+       if (ret < 0) {
+               dev_err(dev, "Failed to reset chip\n");
+               return ret;
+       }
+
+       ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val);
+       if (ret < 0) {
+               dev_err(dev, "Error reading chip ID\n");
+               return ret;
+       }
+
+       if (val != BME680_CHIP_ID_VAL) {
+               dev_err(dev, "Wrong chip ID, got %x expected %x\n",
+                               val, BME680_CHIP_ID_VAL);
+               return -ENODEV;
+       }
+
        indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
        if (!indio_dev)
                return -ENOMEM;
index b2f805b6b36a4904fea4183aa66cce10304b6a67..de9c9e3d23ea347824f0a480031bf46e11021c6a 100644 (file)
@@ -23,8 +23,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
 {
        struct regmap *regmap;
        const char *name = NULL;
-       unsigned int val;
-       int ret;
 
        regmap = devm_regmap_init_i2c(client, &bme680_regmap_config);
        if (IS_ERR(regmap)) {
@@ -33,25 +31,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
                return PTR_ERR(regmap);
        }
 
-       ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C,
-                          BME680_CMD_SOFTRESET);
-       if (ret < 0) {
-               dev_err(&client->dev, "Failed to reset chip\n");
-               return ret;
-       }
-
-       ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val);
-       if (ret < 0) {
-               dev_err(&client->dev, "Error reading I2C chip ID\n");
-               return ret;
-       }
-
-       if (val != BME680_CHIP_ID_VAL) {
-               dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n",
-                               val, BME680_CHIP_ID_VAL);
-               return -ENODEV;
-       }
-
        if (id)
                name = id->name;
 
index d0b7bdd3f066021938f436b3398a5c5532884a35..3b838068a7e48d7378597aeec7eef7da5953cd58 100644 (file)
 
 #include "bme680.h"
 
+struct bme680_spi_bus_context {
+       struct spi_device *spi;
+       u8 current_page;
+};
+
+/*
+ * In SPI mode there are only 7 address bits, a "page" register determines
+ * which part of the 8-bit range is active. This function looks at the address
+ * and writes the page selection bit if needed
+ */
+static int bme680_regmap_spi_select_page(
+       struct bme680_spi_bus_context *ctx, u8 reg)
+{
+       struct spi_device *spi = ctx->spi;
+       int ret;
+       u8 buf[2];
+       u8 page = (reg & 0x80) ? 0 : 1; /* Page "1" is low range */
+
+       if (page == ctx->current_page)
+               return 0;
+
+       /*
+        * Data sheet claims we're only allowed to change bit 4, so we must do
+        * a read-modify-write on each and every page select
+        */
+       buf[0] = BME680_REG_STATUS;
+       ret = spi_write_then_read(spi, buf, 1, buf + 1, 1);
+       if (ret < 0) {
+               dev_err(&spi->dev, "failed to set page %u\n", page);
+               return ret;
+       }
+
+       buf[0] = BME680_REG_STATUS;
+       if (page)
+               buf[1] |= BME680_SPI_MEM_PAGE_BIT;
+       else
+               buf[1] &= ~BME680_SPI_MEM_PAGE_BIT;
+
+       ret = spi_write(spi, buf, 2);
+       if (ret < 0) {
+               dev_err(&spi->dev, "failed to set page %u\n", page);
+               return ret;
+       }
+
+       ctx->current_page = page;
+
+       return 0;
+}
+
 static int bme680_regmap_spi_write(void *context, const void *data,
                                   size_t count)
 {
-       struct spi_device *spi = context;
+       struct bme680_spi_bus_context *ctx = context;
+       struct spi_device *spi = ctx->spi;
+       int ret;
        u8 buf[2];
 
        memcpy(buf, data, 2);
+
+       ret = bme680_regmap_spi_select_page(ctx, buf[0]);
+       if (ret)
+               return ret;
+
        /*
         * The SPI register address (= full register address without bit 7)
         * and the write command (bit7 = RW = '0')
         */
        buf[0] &= ~0x80;
 
-       return spi_write_then_read(spi, buf, 2, NULL, 0);
+       return spi_write(spi, buf, 2);
 }
 
 static int bme680_regmap_spi_read(void *context, const void *reg,
                                  size_t reg_size, void *val, size_t val_size)
 {
-       struct spi_device *spi = context;
+       struct bme680_spi_bus_context *ctx = context;
+       struct spi_device *spi = ctx->spi;
+       int ret;
+       u8 addr = *(const u8 *)reg;
+
+       ret = bme680_regmap_spi_select_page(ctx, addr);
+       if (ret)
+               return ret;
 
-       return spi_write_then_read(spi, reg, reg_size, val, val_size);
+       addr |= 0x80; /* bit7 = RW = '1' */
+
+       return spi_write_then_read(spi, &addr, 1, val, val_size);
 }
 
 static struct regmap_bus bme680_regmap_bus = {
@@ -46,8 +111,8 @@ static struct regmap_bus bme680_regmap_bus = {
 static int bme680_spi_probe(struct spi_device *spi)
 {
        const struct spi_device_id *id = spi_get_device_id(spi);
+       struct bme680_spi_bus_context *bus_context;
        struct regmap *regmap;
-       unsigned int val;
        int ret;
 
        spi->bits_per_word = 8;
@@ -57,45 +122,21 @@ static int bme680_spi_probe(struct spi_device *spi)
                return ret;
        }
 
+       bus_context = devm_kzalloc(&spi->dev, sizeof(*bus_context), GFP_KERNEL);
+       if (!bus_context)
+               return -ENOMEM;
+
+       bus_context->spi = spi;
+       bus_context->current_page = 0xff; /* Undefined on warm boot */
+
        regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus,
-                                 &spi->dev, &bme680_regmap_config);
+                                 bus_context, &bme680_regmap_config);
        if (IS_ERR(regmap)) {
                dev_err(&spi->dev, "Failed to register spi regmap %d\n",
                                (int)PTR_ERR(regmap));
                return PTR_ERR(regmap);
        }
 
-       ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI,
-                          BME680_CMD_SOFTRESET);
-       if (ret < 0) {
-               dev_err(&spi->dev, "Failed to reset chip\n");
-               return ret;
-       }
-
-       /* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */
-       ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val);
-       if (ret < 0) {
-               dev_err(&spi->dev, "Error reading SPI chip ID\n");
-               return ret;
-       }
-
-       if (val != BME680_CHIP_ID_VAL) {
-               dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n",
-                               val, BME680_CHIP_ID_VAL);
-               return -ENODEV;
-       }
-       /*
-        * select Page 1 of spi_mem_page to enable access to
-        * to registers from address 0x00 to 0x7F.
-        */
-       ret = regmap_write_bits(regmap, BME680_REG_STATUS,
-                               BME680_SPI_MEM_PAGE_BIT,
-                               BME680_SPI_MEM_PAGE_1_VAL);
-       if (ret < 0) {
-               dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n");
-               return ret;
-       }
-
        return bme680_core_probe(&spi->dev, regmap, id->name);
 }
 
index 89cb0066a6e0839f49fd68fb2395e8425b174c90..8d76afb87d87c58322b3ee8835ea31f5edc5a834 100644 (file)
@@ -103,9 +103,10 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
                         * Do not use IIO_DEGREE_TO_RAD to avoid precision
                         * loss. Round to the nearest integer.
                         */
-                       *val = div_s64(val64 * 314159 + 9000000ULL, 1000);
-                       *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
-                       ret = IIO_VAL_FRACTIONAL;
+                       *val = 0;
+                       *val2 = div_s64(val64 * 3141592653ULL,
+                                       180 << (CROS_EC_SENSOR_BITS - 1));
+                       ret = IIO_VAL_INT_PLUS_NANO;
                        break;
                case MOTIONSENSE_TYPE_MAG:
                        /*
index 6d71fd905e29d69b0a5b1afa99c5451037333153..c701a45469f6436746b8c7e7b2da0680c45829ec 100644 (file)
@@ -92,6 +92,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
 
        inoutbuf[0] = 0x60; /* write EEPROM */
        inoutbuf[0] |= data->ref_mode << 3;
+       inoutbuf[0] |= data->powerdown ? ((data->powerdown_mode + 1) << 1) : 0;
        inoutbuf[1] = data->dac_value >> 4;
        inoutbuf[2] = (data->dac_value & 0xf) << 4;
 
index 63ca31628a93af8454f5fce9ced15d752f28f16d..92c07ab826eb32c9d4665728159dbd4358cefc80 100644 (file)
@@ -582,11 +582,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
        case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
                return bmg160_get_filter(data, val);
        case IIO_CHAN_INFO_SCALE:
-               *val = 0;
                switch (chan->type) {
                case IIO_TEMP:
-                       *val2 = 500000;
-                       return IIO_VAL_INT_PLUS_MICRO;
+                       *val = 500;
+                       return IIO_VAL_INT;
                case IIO_ANGL_VEL:
                {
                        int i;
@@ -594,6 +593,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
                        for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
                                if (bmg160_scale_table[i].dps_range ==
                                                        data->dps_range) {
+                                       *val = 0;
                                        *val2 = bmg160_scale_table[i].scale;
                                        return IIO_VAL_INT_PLUS_MICRO;
                                }
index 77fac81a3adce2245fe0bf499b60a382c08af98b..5ddebede31a6f6f3625263893ad27a0f3fb68b88 100644 (file)
@@ -29,7 +29,8 @@
 
 #include "mpu3050.h"
 
-#define MPU3050_CHIP_ID                0x69
+#define MPU3050_CHIP_ID                0x68
+#define MPU3050_CHIP_ID_MASK   0x7E
 
 /*
  * Register map: anything suffixed *_H is a big-endian high byte and always
@@ -1176,8 +1177,9 @@ int mpu3050_common_probe(struct device *dev,
                goto err_power_down;
        }
 
-       if (val != MPU3050_CHIP_ID) {
-               dev_err(dev, "unsupported chip id %02x\n", (u8)val);
+       if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) {
+               dev_err(dev, "unsupported chip id %02x\n",
+                               (u8)(val & MPU3050_CHIP_ID_MASK));
                ret = -ENODEV;
                goto err_power_down;
        }
index cd5bfe39591bb2b2d44b3848cc2d84ef2d4a38f2..dadd921a4a30fdb527faf9e0b8e359ba6fa61bc0 100644 (file)
@@ -320,9 +320,8 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
        const unsigned long *mask;
        unsigned long *trialmask;
 
-       trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
-                                 sizeof(*trialmask),
-                                 GFP_KERNEL);
+       trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
+                           sizeof(*trialmask), GFP_KERNEL);
        if (trialmask == NULL)
                return -ENOMEM;
        if (!indio_dev->masklength) {
index 4700fd5d8c90a6ebaee08659b4bb17ca9db105a4..9c4d92115504ae093b8990ffdd4bb649f2796228 100644 (file)
@@ -1743,10 +1743,10 @@ EXPORT_SYMBOL(__iio_device_register);
  **/
 void iio_device_unregister(struct iio_dev *indio_dev)
 {
-       mutex_lock(&indio_dev->info_exist_lock);
-
        cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
 
+       mutex_lock(&indio_dev->info_exist_lock);
+
        iio_device_unregister_debugfs(indio_dev);
 
        iio_disable_all_buffers(indio_dev);
index ea0bc6885517b30da5c7a20d6e3805c582f3824b..32cc8fe7902f13dd5f6289ca36f5a8aca84a172b 100644 (file)
@@ -160,6 +160,7 @@ struct ib_uverbs_file {
 
        struct mutex umap_lock;
        struct list_head umaps;
+       struct page *disassociate_page;
 
        struct idr              idr;
        /* spinlock protects write access to idr */
index 70b7d80431a9b935b9a7ffa6fa50be6601f9c4a0..c489f545baaee880df3a040d5f5b88a081bb3a16 100644 (file)
@@ -208,6 +208,9 @@ void ib_uverbs_release_file(struct kref *ref)
                kref_put(&file->async_file->ref,
                         ib_uverbs_release_async_event_file);
        put_device(&file->device->dev);
+
+       if (file->disassociate_page)
+               __free_pages(file->disassociate_page, 0);
        kfree(file);
 }
 
@@ -877,9 +880,50 @@ static void rdma_umap_close(struct vm_area_struct *vma)
        kfree(priv);
 }
 
+/*
+ * Once the zap_vma_ptes has been called touches to the VMA will come here and
+ * we return a dummy writable zero page for all the pfns.
+ */
+static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
+{
+       struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
+       struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
+       vm_fault_t ret = 0;
+
+       if (!priv)
+               return VM_FAULT_SIGBUS;
+
+       /* Read only pages can just use the system zero page. */
+       if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
+               vmf->page = ZERO_PAGE(vmf->address);
+               get_page(vmf->page);
+               return 0;
+       }
+
+       mutex_lock(&ufile->umap_lock);
+       if (!ufile->disassociate_page)
+               ufile->disassociate_page =
+                       alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
+
+       if (ufile->disassociate_page) {
+               /*
+                * This VMA is forced to always be shared so this doesn't have
+                * to worry about COW.
+                */
+               vmf->page = ufile->disassociate_page;
+               get_page(vmf->page);
+       } else {
+               ret = VM_FAULT_SIGBUS;
+       }
+       mutex_unlock(&ufile->umap_lock);
+
+       return ret;
+}
+
 static const struct vm_operations_struct rdma_umap_ops = {
        .open = rdma_umap_open,
        .close = rdma_umap_close,
+       .fault = rdma_umap_fault,
 };
 
 static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
@@ -889,6 +933,9 @@ static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
        struct ib_uverbs_file *ufile = ucontext->ufile;
        struct rdma_umap_priv *priv;
 
+       if (!(vma->vm_flags & VM_SHARED))
+               return ERR_PTR(-EINVAL);
+
        if (vma->vm_end - vma->vm_start != size)
                return ERR_PTR(-EINVAL);
 
@@ -992,7 +1039,9 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
                 * at a time to get the lock ordering right. Typically there
                 * will only be one mm, so no big deal.
                 */
-               down_write(&mm->mmap_sem);
+               down_read(&mm->mmap_sem);
+               if (!mmget_still_valid(mm))
+                       goto skip_mm;
                mutex_lock(&ufile->umap_lock);
                list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
                                          list) {
@@ -1004,10 +1053,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
 
                        zap_vma_ptes(vma, vma->vm_start,
                                     vma->vm_end - vma->vm_start);
-                       vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
                }
                mutex_unlock(&ufile->umap_lock);
-               up_write(&mm->mmap_sem);
+       skip_mm:
+               up_read(&mm->mmap_sem);
                mmput(mm);
        }
 }
index 612f04190ed8386e51ab5f8321464320140c1e77..9784c6c0d2ecfbbca031871f54fcc415602029fc 100644 (file)
@@ -13232,7 +13232,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
        int total_contexts;
        int ret;
        unsigned ngroups;
-       int qos_rmt_count;
+       int rmt_count;
        int user_rmt_reduced;
        u32 n_usr_ctxts;
        u32 send_contexts = chip_send_contexts(dd);
@@ -13294,10 +13294,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
                n_usr_ctxts = rcv_contexts - total_contexts;
        }
 
-       /* each user context requires an entry in the RMT */
-       qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
-       if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
-               user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
+       /*
+        * The RMT entries are currently allocated as shown below:
+        * 1. QOS (0 to 128 entries);
+        * 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
+        * 3. VNIC (num_vnic_contexts).
+        * It should be noted that PSM FECN oversubscribe num_vnic_contexts
+        * entries of RMT because both VNIC and PSM could allocate any receive
+        * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
+        * and PSM FECN must reserve an RMT entry for each possible PSM receive
+        * context.
+        */
+       rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
+       if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
+               user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
                dd_dev_err(dd,
                           "RMT size is reducing the number of user receive contexts from %u to %d\n",
                           n_usr_ctxts,
@@ -14285,9 +14295,11 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
        u64 reg;
        int i, idx, regoff, regidx;
        u8 offset;
+       u32 total_cnt;
 
        /* there needs to be enough room in the map table */
-       if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
+       total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
+       if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
                dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
                return;
        }
@@ -14341,7 +14353,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd,
        /* add rule 1 */
        add_rsm_rule(dd, RSM_INS_FECN, &rrd);
 
-       rmt->used += dd->num_user_contexts;
+       rmt->used += total_cnt;
 }
 
 /* Initialize RSM for VNIC */
index 9b643c2409cf8bee5c28084b104c2839bc5768aa..eba300330a027acdae1b97c92af5ef07ece6b605 100644 (file)
@@ -898,7 +898,9 @@ void notify_error_qp(struct rvt_qp *qp)
                if (!list_empty(&priv->s_iowait.list) &&
                    !(qp->s_flags & RVT_S_BUSY) &&
                    !(priv->s_flags & RVT_S_BUSY)) {
-                       qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
+                       qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
+                       iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
+                       iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
                        list_del_init(&priv->s_iowait.list);
                        priv->s_iowait.lock = NULL;
                        rvt_put_qp(qp);
index e6726c1ab8669a66722835b43d8b6b3481a11754..5991211d72bdd84d307ab4ebc245a011899eb4bc 100644 (file)
@@ -3088,7 +3088,7 @@ send_last:
                        update_ack_queue(qp, next);
                }
                e = &qp->s_ack_queue[qp->r_head_ack_queue];
-               if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+               if (e->rdma_sge.mr) {
                        rvt_put_mr(e->rdma_sge.mr);
                        e->rdma_sge.mr = NULL;
                }
@@ -3166,7 +3166,7 @@ send_last:
                        update_ack_queue(qp, next);
                }
                e = &qp->s_ack_queue[qp->r_head_ack_queue];
-               if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+               if (e->rdma_sge.mr) {
                        rvt_put_mr(e->rdma_sge.mr);
                        e->rdma_sge.mr = NULL;
                }
index fdda33aca77f2031ea2357435c029de0508ffdec..43cbce7a19ea43f42af2464a782221da2ee386bf 100644 (file)
@@ -5017,24 +5017,14 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
            make_tid_rdma_ack(qp, ohdr, ps))
                return 1;
 
-       if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
-               if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
-                       goto bail;
-               /* We are in the error state, flush the work request. */
-               if (qp->s_last == READ_ONCE(qp->s_head))
-                       goto bail;
-               /* If DMAs are in progress, we can't flush immediately. */
-               if (iowait_sdma_pending(&priv->s_iowait)) {
-                       qp->s_flags |= RVT_S_WAIT_DMA;
-                       goto bail;
-               }
-               clear_ahg(qp);
-               wqe = rvt_get_swqe_ptr(qp, qp->s_last);
-               hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
-                                        IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
-               /* will get called again */
-               goto done_free_tx;
-       }
+       /*
+        * Bail out if we can't send data.
+        * Be reminded that this check must been done after the call to
+        * make_tid_rdma_ack() because the responding QP could be in
+        * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA.
+        */
+       if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK))
+               goto bail;
 
        if (priv->s_flags & RVT_S_WAIT_ACK)
                goto bail;
@@ -5144,11 +5134,6 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
        hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2,
                             middle, ps);
        return 1;
-done_free_tx:
-       hfi1_put_txreq(ps->s_txreq);
-       ps->s_txreq = NULL;
-       return 1;
-
 bail:
        hfi1_put_txreq(ps->s_txreq);
 bail_no_tx:
index f1fec56f3ff49047d7ade725d13b43d3dba8bbb6..8e29dbb5b5fbc3bd883384915e76155201572848 100644 (file)
@@ -792,6 +792,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
                idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
                dma_offset = offset = idx_offset * table->obj_size;
        } else {
+               u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
+
                hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
                /* mtt mhop */
                i = mhop.l0_idx;
@@ -803,8 +805,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
                        hem_idx = i;
 
                hem = table->hem[hem_idx];
-               dma_offset = offset = (obj & (table->num_obj - 1)) *
-                                      table->obj_size % mhop.bt_chunk_size;
+               dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size %
+                                      mhop.bt_chunk_size;
                if (mhop.hop_num == 2)
                        dma_offset = offset = 0;
        }
index b09f1cde2ff54ca9522a60d21ba3e82967938574..08be0e4eabcd764e9af0a666cec02fad1e921f76 100644 (file)
@@ -746,7 +746,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
        struct hns_roce_hem_table *table;
        dma_addr_t dma_handle;
        __le64 *mtts;
-       u32 s = start_index * sizeof(u64);
        u32 bt_page_size;
        u32 i;
 
@@ -780,7 +779,8 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
                return -EINVAL;
 
        mtts = hns_roce_table_find(hr_dev, table,
-                               mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
+                               mtt->first_seg +
+                               start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
                                &dma_handle);
        if (!mtts)
                return -ENOMEM;
index 57c76eafef2f8a896ff336af5bfa10954c99f1e4..60cf9f03e9414e98e97f325cc7f0b937af36bcc0 100644 (file)
@@ -274,9 +274,6 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
        wait_for_completion(&hr_qp->free);
 
        if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
-               if (hr_dev->caps.sccc_entry_sz)
-                       hns_roce_table_put(hr_dev, &qp_table->sccc_table,
-                                          hr_qp->qpn);
                if (hr_dev->caps.trrl_entry_sz)
                        hns_roce_table_put(hr_dev, &qp_table->trrl_table,
                                           hr_qp->qpn);
@@ -536,7 +533,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
 
 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
 {
-       if (attr->qp_type == IB_QPT_XRC_TGT)
+       if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
                return 0;
 
        return 1;
index c5a881172524a6badd8f088850fb60c57120b1ce..337410f4086082d8f57c69afaaf9c3ccb9452de2 100644 (file)
@@ -173,7 +173,12 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
 
                rcu_read_lock();
                in = __in_dev_get_rcu(upper_dev);
-               local_ipaddr = ntohl(in->ifa_list->ifa_address);
+
+               if (!in->ifa_list)
+                       local_ipaddr = 0;
+               else
+                       local_ipaddr = ntohl(in->ifa_list->ifa_address);
+
                rcu_read_unlock();
        } else {
                local_ipaddr = ntohl(ifa->ifa_address);
@@ -185,6 +190,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
        case NETDEV_UP:
                /* Fall through */
        case NETDEV_CHANGEADDR:
+
+               /* Just skip if no need to handle ARP cache */
+               if (!local_ipaddr)
+                       break;
+
                i40iw_manage_arp_cache(iwdev,
                                       netdev->dev_addr,
                                       &local_ipaddr,
index 782499abcd9868d63b5f789ee002a0594b00c4d6..2a0b59a4b6ebc3c34ff9ff3308af7e337d2001f3 100644 (file)
@@ -804,8 +804,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
        unsigned long flags;
 
        for (i = 0 ; i < dev->num_ports; i++) {
-               cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
                det = &sriov->alias_guid.ports_guid[i];
+               cancel_delayed_work_sync(&det->alias_guid_work);
                spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
                while (!list_empty(&det->cb_list)) {
                        cb_ctx = list_entry(det->cb_list.next,
index eaa055007f28edfa6ac34e6fd88550ed879f12eb..9e08df7914aa2e142c8926a1230516f5c214d326 100644 (file)
@@ -20,6 +20,7 @@
 
 enum devx_obj_flags {
        DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
+       DEVX_OBJ_FLAGS_DCT = 1 << 1,
 };
 
 struct devx_async_data {
@@ -39,7 +40,10 @@ struct devx_obj {
        u32                     dinlen; /* destroy inbox length */
        u32                     dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
        u32                     flags;
-       struct mlx5_ib_devx_mr  devx_mr;
+       union {
+               struct mlx5_ib_devx_mr  devx_mr;
+               struct mlx5_core_dct    core_dct;
+       };
 };
 
 struct devx_umem {
@@ -347,7 +351,6 @@ static u64 devx_get_obj_id(const void *in)
                obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
                                        MLX5_GET(arm_rq_in, in, srq_number));
                break;
-       case MLX5_CMD_OP_DRAIN_DCT:
        case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
                obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
                                        MLX5_GET(drain_dct_in, in, dctn));
@@ -618,7 +621,6 @@ static bool devx_is_obj_modify_cmd(const void *in)
        case MLX5_CMD_OP_2RST_QP:
        case MLX5_CMD_OP_ARM_XRC_SRQ:
        case MLX5_CMD_OP_ARM_RQ:
-       case MLX5_CMD_OP_DRAIN_DCT:
        case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
        case MLX5_CMD_OP_ARM_XRQ:
        case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
@@ -1124,7 +1126,11 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
        if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
                devx_cleanup_mkey(obj);
 
-       ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+       if (obj->flags & DEVX_OBJ_FLAGS_DCT)
+               ret = mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
+       else
+               ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
+                                   sizeof(out));
        if (ib_is_destroy_retryable(ret, why, uobject))
                return ret;
 
@@ -1185,9 +1191,17 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
                devx_set_umem_valid(cmd_in);
        }
 
-       err = mlx5_cmd_exec(dev->mdev, cmd_in,
-                           cmd_in_len,
-                           cmd_out, cmd_out_len);
+       if (opcode == MLX5_CMD_OP_CREATE_DCT) {
+               obj->flags |= DEVX_OBJ_FLAGS_DCT;
+               err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
+                                          cmd_in, cmd_in_len,
+                                          cmd_out, cmd_out_len);
+       } else {
+               err = mlx5_cmd_exec(dev->mdev, cmd_in,
+                                   cmd_in_len,
+                                   cmd_out, cmd_out_len);
+       }
+
        if (err)
                goto obj_free;
 
@@ -1214,7 +1228,11 @@ err_copy:
        if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
                devx_cleanup_mkey(obj);
 obj_destroy:
-       mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+       if (obj->flags & DEVX_OBJ_FLAGS_DCT)
+               mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
+       else
+               mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
+                             sizeof(out));
 obj_free:
        kfree(obj);
        return err;
index 994c19d012118b11a4a3ab3d3349ffad3002f645..d3dd290ae1b176d609c14937c4ffedc2fe992a20 100644 (file)
@@ -415,10 +415,17 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
                *active_speed = IB_SPEED_EDR;
                break;
        case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
+               *active_width = IB_WIDTH_2X;
+               *active_speed = IB_SPEED_EDR;
+               break;
        case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
                *active_width = IB_WIDTH_1X;
                *active_speed = IB_SPEED_HDR;
                break;
+       case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
+               *active_width = IB_WIDTH_4X;
+               *active_speed = IB_SPEED_EDR;
+               break;
        case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
                *active_width = IB_WIDTH_2X;
                *active_speed = IB_SPEED_HDR;
@@ -1112,6 +1119,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
                if (MLX5_CAP_GEN(mdev, qp_packet_based))
                        resp.flags |=
                                MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
+
+               resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
        }
 
        if (field_avail(typeof(resp), sw_parsing_caps,
@@ -2059,6 +2068,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
 
        if (vma->vm_flags & VM_WRITE)
                return -EPERM;
+       vma->vm_flags &= ~VM_MAYWRITE;
 
        if (!dev->mdev->clock_info_page)
                return -EOPNOTSUPP;
@@ -2224,19 +2234,18 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
 
                if (vma->vm_flags & VM_WRITE)
                        return -EPERM;
+               vma->vm_flags &= ~VM_MAYWRITE;
 
                /* Don't expose to user-space information it shouldn't have */
                if (PAGE_SIZE > 4096)
                        return -EOPNOTSUPP;
 
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
                pfn = (dev->mdev->iseg_base +
                       offsetof(struct mlx5_init_seg, internal_timer_h)) >>
                        PAGE_SHIFT;
-               if (io_remap_pfn_range(vma, vma->vm_start, pfn,
-                                      PAGE_SIZE, vma->vm_page_prot))
-                       return -EAGAIN;
-               break;
+               return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
+                                        PAGE_SIZE,
+                                        pgprot_noncached(vma->vm_page_prot));
        case MLX5_IB_MMAP_CLOCK_INFO:
                return mlx5_ib_mmap_clock_info_page(dev, vma, context);
 
index c20bfc41ecf18602cd0f289941d49dd6d37390da..0aa10ebda5d9af2f60f5d98807a2de6ec307ad04 100644 (file)
@@ -585,7 +585,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
        struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
        bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
        bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
-       u64 access_mask = ODP_READ_ALLOWED_BIT;
+       u64 access_mask;
        u64 start_idx, page_mask;
        struct ib_umem_odp *odp;
        size_t size;
@@ -607,6 +607,7 @@ next_mr:
        page_shift = mr->umem->page_shift;
        page_mask = ~(BIT(page_shift) - 1);
        start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
+       access_mask = ODP_READ_ALLOWED_BIT;
 
        if (prefetch && !downgrade && !mr->umem->writable) {
                /* prefetch with write-access must
index 6b1f0e76900b23778a11248b2c74985f8cc4ff7c..8870c350fda0b109cc4cb98c9787fd0452821865 100644 (file)
@@ -1818,13 +1818,16 @@ static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
 
        rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
 
-       if (rcqe_sz == 128) {
-               MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+       if (init_attr->qp_type == MLX5_IB_QPT_DCT) {
+               if (rcqe_sz == 128)
+                       MLX5_SET(dctc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+
                return;
        }
 
-       if (init_attr->qp_type != MLX5_IB_QPT_DCT)
-               MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
+       MLX5_SET(qpc, qpc, cs_res,
+                rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
+                                 MLX5_RES_SCAT_DATA32_CQE);
 }
 
 static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
@@ -3729,6 +3732,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 
        } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
                struct mlx5_ib_modify_qp_resp resp = {};
+               u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
                u32 min_resp_len = offsetof(typeof(resp), dctn) +
                                   sizeof(resp.dctn);
 
@@ -3747,7 +3751,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
 
                err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
-                                          MLX5_ST_SZ_BYTES(create_dct_in));
+                                          MLX5_ST_SZ_BYTES(create_dct_in), out,
+                                          sizeof(out));
                if (err)
                        return err;
                resp.dctn = qp->dct.mdct.mqp.qpn;
index 6d8b3e0de57a8e0d3e8071d9aebd0707618cebb4..ec41400fec0c01aa4d2894b7652701a90a190fff 100644 (file)
@@ -1131,6 +1131,8 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
        pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
        pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
        pvrdma_free_slots(dev);
+       dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
+                         dev->dsrbase);
 
        iounmap(dev->regs);
        kfree(dev->sgid_tbl);
index 7287950434969243335e904aa8045d7588ca2d2f..0bb6e39dd03a730783249586d409be6477a6d317 100644 (file)
@@ -608,11 +608,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
        if (unlikely(mapped_segs == mr->mr.max_segs))
                return -ENOMEM;
 
-       if (mr->mr.length == 0) {
-               mr->mr.user_base = addr;
-               mr->mr.iova = addr;
-       }
-
        m = mapped_segs / RVT_SEGSZ;
        n = mapped_segs % RVT_SEGSZ;
        mr->mr.map[m]->segs[n].vaddr = (void *)addr;
@@ -630,17 +625,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
  * @sg_nents: number of entries in sg
  * @sg_offset: offset in bytes into sg
  *
+ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
+ *
  * Return: number of sg elements mapped to the memory region
  */
 int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
                  int sg_nents, unsigned int *sg_offset)
 {
        struct rvt_mr *mr = to_imr(ibmr);
+       int ret;
 
        mr->mr.length = 0;
        mr->mr.page_shift = PAGE_SHIFT;
-       return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
-                             rvt_set_page);
+       ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
+       mr->mr.user_base = ibmr->iova;
+       mr->mr.iova = ibmr->iova;
+       mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
+       mr->mr.length = (size_t)ibmr->length;
+       return ret;
 }
 
 /**
@@ -671,6 +673,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
        ibmr->rkey = key;
        mr->mr.lkey = key;
        mr->mr.access_flags = access;
+       mr->mr.iova = ibmr->iova;
        atomic_set(&mr->mr.lkey_invalid, 0);
 
        return 0;
index a878351f16439859e3931ec71506c705eb9a6f6e..52d7f55fca329c09c9788cb8bcd91509a5f07426 100644 (file)
@@ -420,7 +420,7 @@ config KEYBOARD_MPR121
 
 config KEYBOARD_SNVS_PWRKEY
        tristate "IMX SNVS Power Key Driver"
-       depends on SOC_IMX6SX || SOC_IMX7D
+       depends on ARCH_MXC || COMPILE_TEST
        depends on OF
        help
          This is the snvs powerkey driver for the Freescale i.MX application
index effb63205d3d7783e8e4e598407332892ef6aae2..4c67cf30a5d9ab14bff5f5c53d289ba347d241f4 100644 (file)
@@ -148,6 +148,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
                return error;
        }
 
+       pdata->input = input;
+       platform_set_drvdata(pdev, pdata);
+
        error = devm_request_irq(&pdev->dev, pdata->irq,
                               imx_snvs_pwrkey_interrupt,
                               0, pdev->name, pdev);
@@ -163,9 +166,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
                return error;
        }
 
-       pdata->input = input;
-       platform_set_drvdata(pdev, pdata);
-
        device_init_wakeup(&pdev->dev, pdata->wakeup);
 
        return 0;
index 628ef617bb2f7f51301d5b422905140fa53b1c1a..f9525d6f0bfe810c9ab1c2bd0a2a971f2e9695b4 100644 (file)
@@ -1339,21 +1339,46 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN0600", 0 },
        { "ELAN0601", 0 },
        { "ELAN0602", 0 },
+       { "ELAN0603", 0 },
+       { "ELAN0604", 0 },
        { "ELAN0605", 0 },
+       { "ELAN0606", 0 },
+       { "ELAN0607", 0 },
        { "ELAN0608", 0 },
        { "ELAN0609", 0 },
        { "ELAN060B", 0 },
        { "ELAN060C", 0 },
+       { "ELAN060F", 0 },
+       { "ELAN0610", 0 },
        { "ELAN0611", 0 },
        { "ELAN0612", 0 },
+       { "ELAN0615", 0 },
+       { "ELAN0616", 0 },
        { "ELAN0617", 0 },
        { "ELAN0618", 0 },
+       { "ELAN0619", 0 },
+       { "ELAN061A", 0 },
+       { "ELAN061B", 0 },
        { "ELAN061C", 0 },
        { "ELAN061D", 0 },
        { "ELAN061E", 0 },
+       { "ELAN061F", 0 },
        { "ELAN0620", 0 },
        { "ELAN0621", 0 },
        { "ELAN0622", 0 },
+       { "ELAN0623", 0 },
+       { "ELAN0624", 0 },
+       { "ELAN0625", 0 },
+       { "ELAN0626", 0 },
+       { "ELAN0627", 0 },
+       { "ELAN0628", 0 },
+       { "ELAN0629", 0 },
+       { "ELAN062A", 0 },
+       { "ELAN062B", 0 },
+       { "ELAN062C", 0 },
+       { "ELAN062D", 0 },
+       { "ELAN0631", 0 },
+       { "ELAN0632", 0 },
        { "ELAN1000", 0 },
        { }
 };
index fc3ab93b7aea454475ee324eecee91470c4a9dc3..7fb358f961957507969db706c780459b937d2ba0 100644 (file)
@@ -860,7 +860,7 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
 
        error = rmi_register_function(fn);
        if (error)
-               goto err_put_fn;
+               return error;
 
        if (pdt->function_number == 0x01)
                data->f01_container = fn;
@@ -870,10 +870,6 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
        list_add_tail(&fn->node, &data->function_list);
 
        return RMI_SCAN_CONTINUE;
-
-err_put_fn:
-       put_device(&fn->dev);
-       return error;
 }
 
 void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
index df64d6aed4f7e10b8eb78eb78619a15d7bcaaf56..93901ebd122a504e7e96c35a17c100ae1ea607e3 100644 (file)
@@ -1230,7 +1230,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
        }
 
        rc = f11_write_control_regs(fn, &f11->sens_query,
-                          &f11->dev_controls, fn->fd.query_base_addr);
+                          &f11->dev_controls, fn->fd.control_base_addr);
        if (rc)
                dev_warn(&fn->dev, "Failed to write control registers\n");
 
index b319e51c379bd664999e7d710c5e1240cc21a42d..f7cdd2ab7f11f6cba22003d4cf71576b4bc77b72 100644 (file)
@@ -2608,7 +2608,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
 
        /* Everything is mapped - write the right values into s->dma_address */
        for_each_sg(sglist, s, nelems, i) {
-               s->dma_address += address + s->offset;
+               /*
+                * Add in the remaining piece of the scatter-gather offset that
+                * was masked out when we were determining the physical address
+                * via (sg_phys(s) & PAGE_MASK) earlier.
+                */
+               s->dma_address += address + (s->offset & ~PAGE_MASK);
                s->dma_length   = s->length;
        }
 
@@ -3164,21 +3169,24 @@ static void amd_iommu_get_resv_regions(struct device *dev,
                return;
 
        list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+               int type, prot = 0;
                size_t length;
-               int prot = 0;
 
                if (devid < entry->devid_start || devid > entry->devid_end)
                        continue;
 
+               type   = IOMMU_RESV_DIRECT;
                length = entry->address_end - entry->address_start;
                if (entry->prot & IOMMU_PROT_IR)
                        prot |= IOMMU_READ;
                if (entry->prot & IOMMU_PROT_IW)
                        prot |= IOMMU_WRITE;
+               if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
+                       /* Exclusion range */
+                       type = IOMMU_RESV_RESERVED;
 
                region = iommu_alloc_resv_region(entry->address_start,
-                                                length, prot,
-                                                IOMMU_RESV_DIRECT);
+                                                length, prot, type);
                if (!region) {
                        dev_err(dev, "Out of memory allocating dm-regions\n");
                        return;
index f773792d77fd533be53ec3796692c140f77121d7..ff40ba758cf365e89ddeb2270971e1536554b817 100644 (file)
@@ -359,7 +359,7 @@ static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
 {
        u64 start = iommu->exclusion_start & PAGE_MASK;
-       u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
+       u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
        u64 entry;
 
        if (!iommu->exclusion_start)
@@ -2013,6 +2013,9 @@ static int __init init_unity_map_range(struct ivmd_header *m)
        if (e == NULL)
                return -ENOMEM;
 
+       if (m->flags & IVMD_FLAG_EXCL_RANGE)
+               init_exclusion_range(m);
+
        switch (m->type) {
        default:
                kfree(e);
@@ -2059,9 +2062,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
 
        while (p < end) {
                m = (struct ivmd_header *)p;
-               if (m->flags & IVMD_FLAG_EXCL_RANGE)
-                       init_exclusion_range(m);
-               else if (m->flags & IVMD_FLAG_UNITY_MAP)
+               if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
                        init_unity_map_range(m);
 
                p += m->length;
index eae0741f72dce2fcea771e415a986b062515c7fc..87965e4d964771bd2352d6254bba299f43734107 100644 (file)
 #define IOMMU_PROT_IR 0x01
 #define IOMMU_PROT_IW 0x02
 
+#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE        (1 << 2)
+
 /* IOMMU capabilities */
 #define IOMMU_CAP_IOTLB   24
 #define IOMMU_CAP_NPCACHE 26
index 87274b54febd0eb3cfb1dd3358609fa037c90095..28cb713d728ceef9eb7f37caa746a546617e1dbb 100644 (file)
@@ -1538,6 +1538,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
        u32 pmen;
        unsigned long flags;
 
+       if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
+               return;
+
        raw_spin_lock_irqsave(&iommu->register_lock, flags);
        pmen = readl(iommu->reg + DMAR_PMEN_REG);
        pmen &= ~DMA_PMEN_EPM;
@@ -5332,7 +5335,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
 
        ctx_lo = context[0].lo;
 
-       sdev->did = domain->iommu_did[iommu->seq_id];
+       sdev->did = FLPT_DEFAULT_DID;
        sdev->sid = PCI_DEVID(info->bus, info->devfn);
 
        if (!(ctx_lo & CONTEXT_PASIDE)) {
index f101afc315abb8da199fa1a9d2bd4df3d44d82e9..9a8a8870e26727e7398afffd5286860b0e8581d9 100644 (file)
 
 #define ARM_V7S_TCR_PD1                        BIT(5)
 
+#ifdef CONFIG_ZONE_DMA32
+#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
+#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
+#else
+#define ARM_V7S_TABLE_GFP_DMA GFP_DMA
+#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
+#endif
+
 typedef u32 arm_v7s_iopte;
 
 static bool selftest_running;
@@ -197,13 +205,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
        void *table = NULL;
 
        if (lvl == 1)
-               table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
+               table = (void *)__get_free_pages(
+                       __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
        else if (lvl == 2)
-               table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
+               table = kmem_cache_zalloc(data->l2_tables, gfp);
        phys = virt_to_phys(table);
-       if (phys != (arm_v7s_iopte)phys)
+       if (phys != (arm_v7s_iopte)phys) {
                /* Doesn't fit in PTE */
+               dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
                goto out_free;
+       }
        if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
                dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
                if (dma_mapping_error(dev, dma))
@@ -733,7 +744,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
        data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
                                            ARM_V7S_TABLE_SIZE(2),
                                            ARM_V7S_TABLE_SIZE(2),
-                                           SLAB_CACHE_DMA, NULL);
+                                           ARM_V7S_TABLE_SLAB_FLAGS, NULL);
        if (!data->l2_tables)
                goto out_free_data;
 
index 33a982e33716369b7d2bf5885ef5a8ed22f04414..109de67d5d727c227d3970b2879edd60d6478357 100644 (file)
@@ -1105,10 +1105,12 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
 
                dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
                if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
-                       dev_warn(dev,
-                                "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
-                                iommu_def_domain_type);
                        dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
+                       if (dom) {
+                               dev_warn(dev,
+                                        "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
+                                        iommu_def_domain_type);
+                       }
                }
 
                group->default_domain = dom;
index f8d3ba2475237f4477994a7c8b8b1cae0cfe3310..2de8122e218fde5856867252679b0b95682f3619 100644 (file)
@@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
                curr_iova = rb_entry(curr, struct iova, node);
        } while (curr && new_pfn <= curr_iova->pfn_hi);
 
-       if (limit_pfn < size || new_pfn < iovad->start_pfn)
+       if (limit_pfn < size || new_pfn < iovad->start_pfn) {
+               iovad->max32_alloc_size = size;
                goto iova32_full;
+       }
 
        /* pfn_lo will point to size aligned address if size_aligned is set */
        new->pfn_lo = new_pfn;
@@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
        return 0;
 
 iova32_full:
-       iovad->max32_alloc_size = size;
        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
        return -ENOMEM;
 }
index aa729078463601464e0c0c90d0c319c83f82214a..0390603170b405862ac63b35310f64e3d5743b7a 100644 (file)
 #define AR71XX_RESET_REG_MISC_INT_ENABLE       4
 
 #define ATH79_MISC_IRQ_COUNT                   32
+#define ATH79_MISC_PERF_IRQ                    5
+
+static int ath79_perfcount_irq;
+
+int get_c0_perfcount_int(void)
+{
+       return ath79_perfcount_irq;
+}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 static void ath79_misc_irq_handler(struct irq_desc *desc)
 {
@@ -113,6 +122,8 @@ static void __init ath79_misc_intc_domain_init(
 {
        void __iomem *base = domain->host_data;
 
+       ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ);
+
        /* Disable and clear all interrupts */
        __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
        __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
index 83364fedbf0ab57962a7b325663ab910d6173a0a..5e4ca139e4eacaa70eb5cba5b7ce4124aed93381 100644 (file)
@@ -275,14 +275,14 @@ out_free:
        return ret;
 }
 
-int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
+static int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
        struct device_node *parent)
 {
        return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
 }
 IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init);
 
-int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
+static int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
        struct device_node *parent)
 {
        return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
index 2dd1ff0cf558050e8ae9cfa7ea81c26d12a012e5..7577755bdcf4f38588438634c7484999a51d927c 100644 (file)
@@ -1482,7 +1482,7 @@ static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
        ra = container_of(a, struct lpi_range, entry);
        rb = container_of(b, struct lpi_range, entry);
 
-       return rb->base_id - ra->base_id;
+       return ra->base_id - rb->base_id;
 }
 
 static void merge_lpi_ranges(void)
index ba2a37a27a54ff9fc3abab3bf98da61d64965b06..fd3110c171bad165737c5bb274f1adda7dbf6daa 100644 (file)
@@ -1089,11 +1089,10 @@ static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
 #endif
 }
 
-static int gic_init_bases(struct gic_chip_data *gic, int irq_start,
+static int gic_init_bases(struct gic_chip_data *gic,
                          struct fwnode_handle *handle)
 {
-       irq_hw_number_t hwirq_base;
-       int gic_irqs, irq_base, ret;
+       int gic_irqs, ret;
 
        if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
                /* Frankein-GIC without banked registers... */
@@ -1145,28 +1144,21 @@ static int gic_init_bases(struct gic_chip_data *gic, int irq_start,
        } else {                /* Legacy support */
                /*
                 * For primary GICs, skip over SGIs.
-                * For secondary GICs, skip over PPIs, too.
+                * No secondary GIC support whatsoever.
                 */
-               if (gic == &gic_data[0] && (irq_start & 31) > 0) {
-                       hwirq_base = 16;
-                       if (irq_start != -1)
-                               irq_start = (irq_start & ~31) + 16;
-               } else {
-                       hwirq_base = 32;
-               }
+               int irq_base;
 
-               gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+               gic_irqs -= 16; /* calculate # of irqs to allocate */
 
-               irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
+               irq_base = irq_alloc_descs(16, 16, gic_irqs,
                                           numa_node_id());
                if (irq_base < 0) {
-                       WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
-                            irq_start);
-                       irq_base = irq_start;
+                       WARN(1, "Cannot allocate irq_descs @ IRQ16, assuming pre-allocated\n");
+                       irq_base = 16;
                }
 
                gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base,
-                                       hwirq_base, &gic_irq_domain_ops, gic);
+                                                   16, &gic_irq_domain_ops, gic);
        }
 
        if (WARN_ON(!gic->domain)) {
@@ -1195,7 +1187,6 @@ error:
 }
 
 static int __init __gic_init_bases(struct gic_chip_data *gic,
-                                  int irq_start,
                                   struct fwnode_handle *handle)
 {
        char *name;
@@ -1231,32 +1222,28 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
                gic_init_chip(gic, NULL, name, false);
        }
 
-       ret = gic_init_bases(gic, irq_start, handle);
+       ret = gic_init_bases(gic, handle);
        if (ret)
                kfree(name);
 
        return ret;
 }
 
-void __init gic_init(unsigned int gic_nr, int irq_start,
-                    void __iomem *dist_base, void __iomem *cpu_base)
+void __init gic_init(void __iomem *dist_base, void __iomem *cpu_base)
 {
        struct gic_chip_data *gic;
 
-       if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR))
-               return;
-
        /*
         * Non-DT/ACPI systems won't run a hypervisor, so let's not
         * bother with these...
         */
        static_branch_disable(&supports_deactivate_key);
 
-       gic = &gic_data[gic_nr];
+       gic = &gic_data[0];
        gic->raw_dist_base = dist_base;
        gic->raw_cpu_base = cpu_base;
 
-       __gic_init_bases(gic, irq_start, NULL);
+       __gic_init_bases(gic, NULL);
 }
 
 static void gic_teardown(struct gic_chip_data *gic)
@@ -1399,7 +1386,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
        if (ret)
                return ret;
 
-       ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode);
+       ret = gic_init_bases(*gic, &dev->of_node->fwnode);
        if (ret) {
                gic_teardown(*gic);
                return ret;
@@ -1459,7 +1446,7 @@ gic_of_init(struct device_node *node, struct device_node *parent)
        if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
                static_branch_disable(&supports_deactivate_key);
 
-       ret = __gic_init_bases(gic, -1, &node->fwnode);
+       ret = __gic_init_bases(gic, &node->fwnode);
        if (ret) {
                gic_teardown(gic);
                return ret;
@@ -1650,7 +1637,7 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
                return -ENOMEM;
        }
 
-       ret = __gic_init_bases(gic, -1, domain_handle);
+       ret = __gic_init_bases(gic, domain_handle);
        if (ret) {
                pr_err("Failed to initialise GIC\n");
                irq_domain_free_fwnode(domain_handle);
index d1098f4da6a4c567adbcb392ab269bdf14c419b1..88df3d00052c00b3be13292215a679b67d58f776 100644 (file)
@@ -169,8 +169,12 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
 
        raw_spin_lock_init(&data->lock);
 
-       of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
-       of_property_read_u32(np, "fsl,channel", &data->channel);
+       ret = of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
+       if (ret)
+               return ret;
+       ret = of_property_read_u32(np, "fsl,channel", &data->channel);
+       if (ret)
+               return ret;
 
        /*
         * There is one output irq for each group of 64 inputs.
index 86b72fbd3b45dd63f495de36435410997740f473..353111a104133a0faeadf5d46e3e2f1bbf617d27 100644 (file)
@@ -130,6 +130,7 @@ static int __init ls1x_intc_of_init(struct device_node *node,
                                             NULL);
        if (!priv->domain) {
                pr_err("ls1x-irq: cannot add IRQ domain\n");
+               err = -ENOMEM;
                goto out_iounmap;
        }
 
index 567b29c476081056232f13eed15a27c8d8d4fa64..98b6e1d4b1a68cf8a247f4c75a67d09fd11b6b08 100644 (file)
@@ -161,6 +161,9 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
        void __iomem *base = d->chip_data;
        u32 val;
 
+       if (!msg->address_lo && !msg->address_hi)
+               return;
        base += get_mbigen_vec_reg(d->hwirq);
        val = readl_relaxed(base);
 
index 3496b61a312aef87cc9668189fd9047a844e8ca3..8eed478f3b7e5d1fd7de2a20dd7c456350e5b0d5 100644 (file)
@@ -179,7 +179,7 @@ static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
        return 0;
 }
 
-const struct irq_domain_ops mmp_irq_domain_ops = {
+static const struct irq_domain_ops mmp_irq_domain_ops = {
        .map            = mmp_irq_domain_map,
        .xlate          = mmp_irq_domain_xlate,
 };
index add4c9c934c8abda564b25904dc7b9f479ca7afc..18832ccc8ff8751d2562f56e53466b6b5e5b84fb 100644 (file)
@@ -478,7 +478,7 @@ dispose_irq:
        return ret;
 }
 
-struct mvebu_sei_caps mvebu_sei_ap806_caps = {
+static struct mvebu_sei_caps mvebu_sei_ap806_caps = {
        .ap_range = {
                .first = 0,
                .size = 21,
index a93296b9b45debecfb723e780f3f381b15660d2e..7bd1d4cb2e194679078ca67f782a48505641d5e7 100644 (file)
@@ -716,7 +716,6 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
        const struct stm32_exti_bank *stm32_bank;
        struct stm32_exti_chip_data *chip_data;
        void __iomem *base = h_data->base;
-       u32 irqs_mask;
 
        stm32_bank = h_data->drv_data->exti_banks[bank_idx];
        chip_data = &h_data->chips_data[bank_idx];
@@ -725,21 +724,12 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
 
        raw_spin_lock_init(&chip_data->rlock);
 
-       /* Determine number of irqs supported */
-       writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst);
-       irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst);
-
        /*
         * This IP has no reset, so after hot reboot we should
         * clear registers to avoid residue
         */
        writel_relaxed(0, base + stm32_bank->imr_ofst);
        writel_relaxed(0, base + stm32_bank->emr_ofst);
-       writel_relaxed(0, base + stm32_bank->rtsr_ofst);
-       writel_relaxed(0, base + stm32_bank->ftsr_ofst);
-       writel_relaxed(~0UL, base + stm32_bank->rpr_ofst);
-       if (stm32_bank->fpr_ofst != UNDEF_REG)
-               writel_relaxed(~0UL, base + stm32_bank->fpr_ofst);
 
        pr_info("%pOF: bank%d\n", h_data->node, bank_idx);
 
index 4d85645c87f78721a83fcef94be1feb3bce8c094..0928fd1f0e0c134943c7dab3aeec7d0e5699eaf5 100644 (file)
@@ -4365,7 +4365,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
        if (m->clock2)
                test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
 
-       if (ent->device == 0xB410) {
+       if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
+           ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
                test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
                test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
                test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
index 4ab8b1b6608f7136365f91d713f65647a8271296..a14e35d405387d4dc43bf672139c773bf4b05d2f 100644 (file)
@@ -710,10 +710,10 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
        struct sock *sk = sock->sk;
        int err = 0;
 
-       if (!maddr || maddr->family != AF_ISDN)
+       if (addr_len < sizeof(struct sockaddr_mISDN))
                return -EINVAL;
 
-       if (addr_len < sizeof(struct sockaddr_mISDN))
+       if (!maddr || maddr->family != AF_ISDN)
                return -EINVAL;
 
        lock_sock(sk);
index 7fea18b0c15d115178c874163232fa6c2e3706ce..7cb4d685a1f107f335c7cf475d06aae6688ab6bc 100644 (file)
@@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client,
        const struct i2c_device_id *id)
 {
        int devid;
+       const struct of_device_id *of_id;
        struct pca9532_data *data = i2c_get_clientdata(client);
        struct pca9532_platform_data *pca9532_pdata =
                        dev_get_platdata(&client->dev);
@@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client,
                        dev_err(&client->dev, "no platform data\n");
                        return -EINVAL;
                }
-               devid = (int)(uintptr_t)of_match_device(
-                       of_pca9532_leds_match, &client->dev)->data;
+               of_id = of_match_device(of_pca9532_leds_match,
+                               &client->dev);
+               if (unlikely(!of_id))
+                       return -EINVAL;
+               devid = (int)(uintptr_t) of_id->data;
        } else {
                devid = id->driver_data;
        }
index 3dd3ed46d473b673fd916085044bd3c3b38b0759..136f86a1627d18cf396990ca1a4122d17578d0af 100644 (file)
@@ -122,7 +122,8 @@ static ssize_t device_name_store(struct device *dev,
                trigger_data->net_dev = NULL;
        }
 
-       strncpy(trigger_data->device_name, buf, size);
+       memcpy(trigger_data->device_name, buf, size);
+       trigger_data->device_name[size] = 0;
        if (size > 0 && trigger_data->device_name[size - 1] == '\n')
                trigger_data->device_name[size - 1] = 0;
 
@@ -301,11 +302,11 @@ static int netdev_trig_notify(struct notifier_block *nb,
                container_of(nb, struct led_netdev_data, notifier);
 
        if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
-           && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
-           && evt != NETDEV_CHANGENAME)
+           && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
                return NOTIFY_DONE;
 
-       if (strcmp(dev->name, trigger_data->device_name))
+       if (!(dev == trigger_data->net_dev ||
+             (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
                return NOTIFY_DONE;
 
        cancel_delayed_work_sync(&trigger_data->work);
@@ -320,12 +321,9 @@ static int netdev_trig_notify(struct notifier_block *nb,
                dev_hold(dev);
                trigger_data->net_dev = dev;
                break;
-       case NETDEV_CHANGENAME:
        case NETDEV_UNREGISTER:
-               if (trigger_data->net_dev) {
-                       dev_put(trigger_data->net_dev);
-                       trigger_data->net_dev = NULL;
-               }
+               dev_put(trigger_data->net_dev);
+               trigger_data->net_dev = NULL;
                break;
        case NETDEV_UP:
        case NETDEV_CHANGE:
index 3789185144dae34241911d3c42308e6cbad650f2..0b7d5fb4548dcd8720f86e98a2567e3d387a061d 100644 (file)
@@ -231,14 +231,14 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
        struct pblk_sec_meta *meta;
        struct bio *new_bio = rqd->bio;
        struct bio *bio = pr_ctx->orig_bio;
-       struct bio_vec src_bv, dst_bv;
        void *meta_list = rqd->meta_list;
-       int bio_init_idx = pr_ctx->bio_init_idx;
        unsigned long *read_bitmap = pr_ctx->bitmap;
+       struct bvec_iter orig_iter = BVEC_ITER_ALL_INIT;
+       struct bvec_iter new_iter = BVEC_ITER_ALL_INIT;
        int nr_secs = pr_ctx->orig_nr_secs;
        int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
        void *src_p, *dst_p;
-       int hole, i;
+       int bit, i;
 
        if (unlikely(nr_holes == 1)) {
                struct ppa_addr ppa;
@@ -257,33 +257,39 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
 
        /* Fill the holes in the original bio */
        i = 0;
-       hole = find_first_zero_bit(read_bitmap, nr_secs);
-       do {
-               struct pblk_line *line;
+       for (bit = 0; bit < nr_secs; bit++) {
+               if (!test_bit(bit, read_bitmap)) {
+                       struct bio_vec dst_bv, src_bv;
+                       struct pblk_line *line;
 
-               line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
-               kref_put(&line->ref, pblk_line_put);
+                       line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
+                       kref_put(&line->ref, pblk_line_put);
 
-               meta = pblk_get_meta(pblk, meta_list, hole);
-               meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
+                       meta = pblk_get_meta(pblk, meta_list, bit);
+                       meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
 
-               src_bv = new_bio->bi_io_vec[i++];
-               dst_bv = bio->bi_io_vec[bio_init_idx + hole];
+                       dst_bv = bio_iter_iovec(bio, orig_iter);
+                       src_bv = bio_iter_iovec(new_bio, new_iter);
 
-               src_p = kmap_atomic(src_bv.bv_page);
-               dst_p = kmap_atomic(dst_bv.bv_page);
+                       src_p = kmap_atomic(src_bv.bv_page);
+                       dst_p = kmap_atomic(dst_bv.bv_page);
 
-               memcpy(dst_p + dst_bv.bv_offset,
-                       src_p + src_bv.bv_offset,
-                       PBLK_EXPOSED_PAGE_SIZE);
+                       memcpy(dst_p + dst_bv.bv_offset,
+                               src_p + src_bv.bv_offset,
+                               PBLK_EXPOSED_PAGE_SIZE);
 
-               kunmap_atomic(src_p);
-               kunmap_atomic(dst_p);
+                       kunmap_atomic(src_p);
+                       kunmap_atomic(dst_p);
 
-               mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
+                       flush_dcache_page(dst_bv.bv_page);
+                       mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
 
-               hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
-       } while (hole < nr_secs);
+                       bio_advance_iter(new_bio, &new_iter,
+                                       PBLK_EXPOSED_PAGE_SIZE);
+                       i++;
+               }
+               bio_advance_iter(bio, &orig_iter, PBLK_EXPOSED_PAGE_SIZE);
+       }
 
        bio_put(new_bio);
        kfree(pr_ctx);
index 1ecef76225a187174b0f9c48d1835eea769b8c7d..2a48ea3f1b30d4adfc6581dff3d1cfe1a088b86a 100644 (file)
@@ -150,7 +150,7 @@ struct dm_buffer {
        void (*end_io)(struct dm_buffer *, blk_status_t);
 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 #define MAX_STACK 10
-       struct stack_trace stack_trace;
+       unsigned int stack_len;
        unsigned long stack_entries[MAX_STACK];
 #endif
 };
@@ -232,11 +232,7 @@ static DEFINE_MUTEX(dm_bufio_clients_lock);
 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
 static void buffer_record_stack(struct dm_buffer *b)
 {
-       b->stack_trace.nr_entries = 0;
-       b->stack_trace.max_entries = MAX_STACK;
-       b->stack_trace.entries = b->stack_entries;
-       b->stack_trace.skip = 2;
-       save_stack_trace(&b->stack_trace);
+       b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
 }
 #endif
 
@@ -438,7 +434,7 @@ static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
        adjust_total_allocated(b->data_mode, (long)c->block_size);
 
 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
-       memset(&b->stack_trace, 0, sizeof(b->stack_trace));
+       b->stack_len = 0;
 #endif
        return b;
 }
@@ -1520,8 +1516,9 @@ static void drop_buffers(struct dm_bufio_client *c)
                        DMERR("leaked buffer %llx, hold count %u, list %d",
                              (unsigned long long)b->block, b->hold_count, i);
 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
-                       print_stack_trace(&b->stack_trace, 1);
-                       b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */
+                       stack_trace_print(b->stack_entries, b->stack_len, 1);
+                       /* mark unclaimed to avoid BUG_ON below */
+                       b->hold_count = 0;
 #endif
                }
 
index 95c6d86ab5e8deaa0d5975240f708fe165061e6e..c4ef1fceead6ee1ba83bfb0e54f2eefae26bd48b 100644 (file)
@@ -115,6 +115,7 @@ struct mapped_device {
        struct srcu_struct io_barrier;
 };
 
+void disable_discard(struct mapped_device *md);
 void disable_write_same(struct mapped_device *md);
 void disable_write_zeroes(struct mapped_device *md);
 
index b53f30f16b4d4f2c02bf9c15e5b801234b8cd9ae..4b76f84424c3c1a73ef3bc3b9605a1486e3bf88b 100644 (file)
@@ -36,7 +36,7 @@ struct dm_device {
        struct list_head list;
 };
 
-const char *dm_allowed_targets[] __initconst = {
+const char * const dm_allowed_targets[] __initconst = {
        "crypt",
        "delay",
        "linear",
index d57d997a52c81cfe6c68918520316f993aeebc44..7c678f50aaa37a5612ea23bed69e0fb31526224e 100644 (file)
@@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
 static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
 {
        return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
-              range2->logical_sector + range2->n_sectors > range2->logical_sector;
+              range1->logical_sector + range1->n_sectors > range2->logical_sector;
 }
 
 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
@@ -959,8 +959,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
                struct dm_integrity_range *last_range =
                        list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
                struct task_struct *last_range_task;
-               if (!ranges_overlap(range, last_range))
-                       break;
                last_range_task = last_range->task;
                list_del(&last_range->wait_entry);
                if (!add_new_range(ic, last_range, false)) {
@@ -3185,7 +3183,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
                        journal_watermark = val;
                else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
                        sync_msec = val;
-               else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) {
+               else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
                        if (ic->meta_dev) {
                                dm_put_device(ti, ic->meta_dev);
                                ic->meta_dev = NULL;
@@ -3204,17 +3202,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
                                goto bad;
                        }
                        ic->sectors_per_block = val >> SECTOR_SHIFT;
-               } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
+               } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
                        r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
                                            "Invalid internal_hash argument");
                        if (r)
                                goto bad;
-               } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
+               } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
                        r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
                                            "Invalid journal_crypt argument");
                        if (r)
                                goto bad;
-               } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
+               } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
                        r = get_alg_and_key(opt_string, &ic->journal_mac_alg,  &ti->error,
                                            "Invalid journal_mac argument");
                        if (r)
@@ -3616,7 +3614,7 @@ static struct target_type integrity_target = {
        .io_hints               = dm_integrity_io_hints,
 };
 
-int __init dm_integrity_init(void)
+static int __init dm_integrity_init(void)
 {
        int r;
 
@@ -3635,7 +3633,7 @@ int __init dm_integrity_init(void)
        return r;
 }
 
-void dm_integrity_exit(void)
+static void __exit dm_integrity_exit(void)
 {
        dm_unregister_target(&integrity_target);
        kmem_cache_destroy(journal_io_cache);
index 09773636602d3d86728b127ddb67b2d674b67cae..b66745bd08bbcc2dd1ab349f47c7326199518778 100644 (file)
@@ -222,11 +222,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
        }
 
        if (unlikely(error == BLK_STS_TARGET)) {
-               if (req_op(clone) == REQ_OP_WRITE_SAME &&
-                   !clone->q->limits.max_write_same_sectors)
+               if (req_op(clone) == REQ_OP_DISCARD &&
+                   !clone->q->limits.max_discard_sectors)
+                       disable_discard(tio->md);
+               else if (req_op(clone) == REQ_OP_WRITE_SAME &&
+                        !clone->q->limits.max_write_same_sectors)
                        disable_write_same(tio->md);
-               if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
-                   !clone->q->limits.max_write_zeroes_sectors)
+               else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
+                        !clone->q->limits.max_write_zeroes_sectors)
                        disable_write_zeroes(tio->md);
        }
 
index ba9481f1bf3c04cf64c7ea5e570f2a2bf533759c..cde3b49b2a9107abafd76d190c9fc61209141f7b 100644 (file)
@@ -1844,6 +1844,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
        return true;
 }
 
+static int device_requires_stable_pages(struct dm_target *ti,
+                                       struct dm_dev *dev, sector_t start,
+                                       sector_t len, void *data)
+{
+       struct request_queue *q = bdev_get_queue(dev->bdev);
+
+       return q && bdi_cap_stable_pages_required(q->backing_dev_info);
+}
+
+/*
+ * If any underlying device requires stable pages, a table must require
+ * them as well.  Only targets that support iterate_devices are considered:
+ * don't want error, zero, etc to require stable pages.
+ */
+static bool dm_table_requires_stable_pages(struct dm_table *t)
+{
+       struct dm_target *ti;
+       unsigned i;
+
+       for (i = 0; i < dm_table_get_num_targets(t); i++) {
+               ti = dm_table_get_target(t, i);
+
+               if (ti->type->iterate_devices &&
+                   ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
+                       return true;
+       }
+
+       return false;
+}
+
 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
                               struct queue_limits *limits)
 {
@@ -1896,6 +1926,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 
        dm_table_verify_integrity(t);
 
+       /*
+        * Some devices don't use blk_integrity but still want stable pages
+        * because they do their own checksumming.
+        */
+       if (dm_table_requires_stable_pages(t))
+               q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+       else
+               q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
+
        /*
         * Determine whether or not this queue's I/O timings contribute
         * to the entropy pool, Only request-based targets use this.
index 68d24056d0b1c17d7fa0c271d1d5582d7eb72c89..043f0761e4a0aea8a22a1c6745f3f9bbbc021dfd 100644 (file)
@@ -945,6 +945,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
        }
 }
 
+void disable_discard(struct mapped_device *md)
+{
+       struct queue_limits *limits = dm_get_queue_limits(md);
+
+       /* device doesn't really support DISCARD, disable it */
+       limits->max_discard_sectors = 0;
+       blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
+}
+
 void disable_write_same(struct mapped_device *md)
 {
        struct queue_limits *limits = dm_get_queue_limits(md);
@@ -970,11 +979,14 @@ static void clone_endio(struct bio *bio)
        dm_endio_fn endio = tio->ti->type->end_io;
 
        if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
-               if (bio_op(bio) == REQ_OP_WRITE_SAME &&
-                   !bio->bi_disk->queue->limits.max_write_same_sectors)
+               if (bio_op(bio) == REQ_OP_DISCARD &&
+                   !bio->bi_disk->queue->limits.max_discard_sectors)
+                       disable_discard(md);
+               else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
+                        !bio->bi_disk->queue->limits.max_write_same_sectors)
                        disable_write_same(md);
-               if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
-                   !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+               else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
+                        !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
                        disable_write_zeroes(md);
        }
 
@@ -1042,15 +1054,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
                return -EINVAL;
        }
 
-       /*
-        * BIO based queue uses its own splitting. When multipage bvecs
-        * is switched on, size of the incoming bio may be too big to
-        * be handled in some targets, such as crypt.
-        *
-        * When these targets are ready for the big bio, we can remove
-        * the limit.
-        */
-       ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
+       ti->max_io_len = (uint32_t) len;
 
        return 0;
 }
index 3972232b80378fa855513ac9f3b088ff7049c35c..749ec268d957d60a7ecb25e6372a633f644d10ec 100644 (file)
 #define MAX_HOLDERS 4
 #define MAX_STACK 10
 
-typedef unsigned long stack_entries[MAX_STACK];
+struct stack_store {
+       unsigned int    nr_entries;
+       unsigned long   entries[MAX_STACK];
+};
 
 struct block_lock {
        spinlock_t lock;
@@ -44,8 +47,7 @@ struct block_lock {
        struct task_struct *holders[MAX_HOLDERS];
 
 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
-       struct stack_trace traces[MAX_HOLDERS];
-       stack_entries entries[MAX_HOLDERS];
+       struct stack_store traces[MAX_HOLDERS];
 #endif
 };
 
@@ -73,7 +75,7 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task)
 {
        unsigned h = __find_holder(lock, NULL);
 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
-       struct stack_trace *t;
+       struct stack_store *t;
 #endif
 
        get_task_struct(task);
@@ -81,11 +83,7 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task)
 
 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
        t = lock->traces + h;
-       t->nr_entries = 0;
-       t->max_entries = MAX_STACK;
-       t->entries = lock->entries[h];
-       t->skip = 2;
-       save_stack_trace(t);
+       t->nr_entries = stack_trace_save(t->entries, MAX_STACK, 2);
 #endif
 }
 
@@ -106,7 +104,8 @@ static int __check_holder(struct block_lock *lock)
                        DMERR("recursive lock detected in metadata");
 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
                        DMERR("previously held here:");
-                       print_stack_trace(lock->traces + i, 4);
+                       stack_trace_print(lock->traces[i].entries,
+                                         lock->traces[i].nr_entries, 4);
 
                        DMERR("subsequent acquisition attempted here:");
                        dump_stack();
index 0ce2d8dfc5f1a19bedbae37f91ce956b1b13da89..26ad6468d13a786552f581e3f45eb444febf0b51 100644 (file)
@@ -1246,7 +1246,7 @@ config MFD_STA2X11
 
 config MFD_SUN6I_PRCM
        bool "Allwinner A31 PRCM controller"
-       depends on ARCH_SUNXI
+       depends on ARCH_SUNXI || COMPILE_TEST
        select MFD_CORE
        help
          Support for the PRCM (Power/Reset/Clock Management) unit available
index 69df27769c2136e817d3baf575269c59902752ac..43ac71691fe477f95eba6293cc6f6b2df810e243 100644 (file)
@@ -53,67 +53,67 @@ static const struct sprd_pmic_data sc2731_data = {
 static const struct mfd_cell sprd_pmic_devs[] = {
        {
                .name = "sc27xx-wdt",
-               .of_compatible = "sprd,sc27xx-wdt",
+               .of_compatible = "sprd,sc2731-wdt",
        }, {
                .name = "sc27xx-rtc",
-               .of_compatible = "sprd,sc27xx-rtc",
+               .of_compatible = "sprd,sc2731-rtc",
        }, {
                .name = "sc27xx-charger",
-               .of_compatible = "sprd,sc27xx-charger",
+               .of_compatible = "sprd,sc2731-charger",
        }, {
                .name = "sc27xx-chg-timer",
-               .of_compatible = "sprd,sc27xx-chg-timer",
+               .of_compatible = "sprd,sc2731-chg-timer",
        }, {
                .name = "sc27xx-fast-chg",
-               .of_compatible = "sprd,sc27xx-fast-chg",
+               .of_compatible = "sprd,sc2731-fast-chg",
        }, {
                .name = "sc27xx-chg-wdt",
-               .of_compatible = "sprd,sc27xx-chg-wdt",
+               .of_compatible = "sprd,sc2731-chg-wdt",
        }, {
                .name = "sc27xx-typec",
-               .of_compatible = "sprd,sc27xx-typec",
+               .of_compatible = "sprd,sc2731-typec",
        }, {
                .name = "sc27xx-flash",
-               .of_compatible = "sprd,sc27xx-flash",
+               .of_compatible = "sprd,sc2731-flash",
        }, {
                .name = "sc27xx-eic",
-               .of_compatible = "sprd,sc27xx-eic",
+               .of_compatible = "sprd,sc2731-eic",
        }, {
                .name = "sc27xx-efuse",
-               .of_compatible = "sprd,sc27xx-efuse",
+               .of_compatible = "sprd,sc2731-efuse",
        }, {
                .name = "sc27xx-thermal",
-               .of_compatible = "sprd,sc27xx-thermal",
+               .of_compatible = "sprd,sc2731-thermal",
        }, {
                .name = "sc27xx-adc",
-               .of_compatible = "sprd,sc27xx-adc",
+               .of_compatible = "sprd,sc2731-adc",
        }, {
                .name = "sc27xx-audio-codec",
-               .of_compatible = "sprd,sc27xx-audio-codec",
+               .of_compatible = "sprd,sc2731-audio-codec",
        }, {
                .name = "sc27xx-regulator",
-               .of_compatible = "sprd,sc27xx-regulator",
+               .of_compatible = "sprd,sc2731-regulator",
        }, {
                .name = "sc27xx-vibrator",
-               .of_compatible = "sprd,sc27xx-vibrator",
+               .of_compatible = "sprd,sc2731-vibrator",
        }, {
                .name = "sc27xx-keypad-led",
-               .of_compatible = "sprd,sc27xx-keypad-led",
+               .of_compatible = "sprd,sc2731-keypad-led",
        }, {
                .name = "sc27xx-bltc",
-               .of_compatible = "sprd,sc27xx-bltc",
+               .of_compatible = "sprd,sc2731-bltc",
        }, {
                .name = "sc27xx-fgu",
-               .of_compatible = "sprd,sc27xx-fgu",
+               .of_compatible = "sprd,sc2731-fgu",
        }, {
                .name = "sc27xx-7sreset",
-               .of_compatible = "sprd,sc27xx-7sreset",
+               .of_compatible = "sprd,sc2731-7sreset",
        }, {
                .name = "sc27xx-poweroff",
-               .of_compatible = "sprd,sc27xx-poweroff",
+               .of_compatible = "sprd,sc2731-poweroff",
        }, {
                .name = "sc27xx-syscon",
-               .of_compatible = "sprd,sc27xx-syscon",
+               .of_compatible = "sprd,sc2731-syscon",
        },
 };
 
index 299016bc46d909b4164708044ae21c9d5d5d14f4..104477b512a296b56e7549f001b63cd1ad9a43ac 100644 (file)
@@ -1245,6 +1245,28 @@ free:
        return status;
 }
 
+static int __maybe_unused twl_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+
+       if (client->irq)
+               disable_irq(client->irq);
+
+       return 0;
+}
+
+static int __maybe_unused twl_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+
+       if (client->irq)
+               enable_irq(client->irq);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume);
+
 static const struct i2c_device_id twl_ids[] = {
        { "twl4030", TWL4030_VAUX2 },   /* "Triton 2" */
        { "twl5030", 0 },               /* T2 updated */
@@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = {
 /* One Client Driver , 4 Clients */
 static struct i2c_driver twl_driver = {
        .driver.name    = DRIVER_NAME,
+       .driver.pm      = &twl_dev_pm_ops,
        .id_table       = twl_ids,
        .probe          = twl_probe,
        .remove         = twl_remove,
index 39f832d2728899a8575763834d53bb438ae262bd..36d0d5c9cfbad80a3d191fa680d8e23f3ee908c1 100644 (file)
@@ -1184,6 +1184,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
        struct fastrpc_session_ctx *sess;
        struct device *dev = &pdev->dev;
        int i, sessions = 0;
+       int rc;
 
        cctx = dev_get_drvdata(dev->parent);
        if (!cctx)
@@ -1213,7 +1214,11 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
        }
        cctx->sesscount++;
        spin_unlock(&cctx->lock);
-       dma_set_mask(dev, DMA_BIT_MASK(32));
+       rc = dma_set_mask(dev, DMA_BIT_MASK(32));
+       if (rc) {
+               dev_err(dev, "32-bit DMA enable failed\n");
+               return rc;
+       }
 
        return 0;
 }
index 3525236ed8d9d702e25fac066926ba1933fe4edc..19c84214a7ea8890543ea8341033ed1ceb89df12 100644 (file)
@@ -179,6 +179,12 @@ static void cs_do_release(struct kref *ref)
 
        /* We also need to update CI for internal queues */
        if (cs->submitted) {
+               int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);
+
+               WARN_ONCE((cs_cnt < 0),
+                       "hl%d: error in CS active cnt %d\n",
+                       hdev->id, cs_cnt);
+
                hl_int_hw_queue_update_ci(cs);
 
                spin_lock(&hdev->hw_queues_mirror_lock);
index a53c12aff6ad9cebd9be4a2b031b9a93ea6c72b3..974a87789bd8689d1530daa8890bac3b3b32d38c 100644 (file)
@@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data)
        struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
        enum vm_type_t *vm_type;
        bool once = true;
+       u64 j;
        int i;
 
        if (!dev_entry->hdev->mmu_enable)
@@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
                        } else {
                                phys_pg_pack = hnode->ptr;
                                seq_printf(s,
-                                       "    0x%-14llx      %-10u       %-4u\n",
+                                       "    0x%-14llx      %-10llu       %-4u\n",
                                        hnode->vaddr, phys_pg_pack->total_size,
                                        phys_pg_pack->handle);
                        }
@@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data)
                                                phys_pg_pack->page_size);
                        seq_puts(s, "   physical address\n");
                        seq_puts(s, "---------------------\n");
-                       for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+                       for (j = 0 ; j < phys_pg_pack->npages ; j++) {
                                seq_printf(s, "    0x%-14llx\n",
-                                               phys_pg_pack->pages[i]);
+                                               phys_pg_pack->pages[j]);
                        }
                }
                spin_unlock(&vm->idr_lock);
index de46aa6ed1542438c5d5952ff77c9cc17dadc5a6..77d51be66c7e84045558fff78eea0a8e9a70439e 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/sched/signal.h>
 #include <linux/hwmon.h>
 
+#define HL_PLDM_PENDING_RESET_PER_SEC  (HL_PENDING_RESET_PER_SEC * 10)
+
 bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
 {
        if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
@@ -216,6 +218,7 @@ static int device_early_init(struct hl_device *hdev)
        spin_lock_init(&hdev->hw_queues_mirror_lock);
        atomic_set(&hdev->in_reset, 0);
        atomic_set(&hdev->fd_open_cnt, 0);
+       atomic_set(&hdev->cs_active_cnt, 0);
 
        return 0;
 
@@ -413,6 +416,27 @@ int hl_device_suspend(struct hl_device *hdev)
 
        pci_save_state(hdev->pdev);
 
+       /* Block future CS/VM/JOB completion operations */
+       rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+       if (rc) {
+               dev_err(hdev->dev, "Can't suspend while in reset\n");
+               return -EIO;
+       }
+
+       /* This blocks all other stuff that is not blocked by in_reset */
+       hdev->disabled = true;
+
+       /*
+        * Flush anyone that is inside the critical section of enqueue
+        * jobs to the H/W
+        */
+       hdev->asic_funcs->hw_queues_lock(hdev);
+       hdev->asic_funcs->hw_queues_unlock(hdev);
+
+       /* Flush processes that are sending message to CPU */
+       mutex_lock(&hdev->send_cpu_message_lock);
+       mutex_unlock(&hdev->send_cpu_message_lock);
+
        rc = hdev->asic_funcs->suspend(hdev);
        if (rc)
                dev_err(hdev->dev,
@@ -440,21 +464,38 @@ int hl_device_resume(struct hl_device *hdev)
 
        pci_set_power_state(hdev->pdev, PCI_D0);
        pci_restore_state(hdev->pdev);
-       rc = pci_enable_device(hdev->pdev);
+       rc = pci_enable_device_mem(hdev->pdev);
        if (rc) {
                dev_err(hdev->dev,
                        "Failed to enable PCI device in resume\n");
                return rc;
        }
 
+       pci_set_master(hdev->pdev);
+
        rc = hdev->asic_funcs->resume(hdev);
        if (rc) {
-               dev_err(hdev->dev,
-                       "Failed to enable PCI access from device CPU\n");
-               return rc;
+               dev_err(hdev->dev, "Failed to resume device after suspend\n");
+               goto disable_device;
+       }
+
+
+       hdev->disabled = false;
+       atomic_set(&hdev->in_reset, 0);
+
+       rc = hl_device_reset(hdev, true, false);
+       if (rc) {
+               dev_err(hdev->dev, "Failed to reset device during resume\n");
+               goto disable_device;
        }
 
        return 0;
+
+disable_device:
+       pci_clear_master(hdev->pdev);
+       pci_disable_device(hdev->pdev);
+
+       return rc;
 }
 
 static void hl_device_hard_reset_pending(struct work_struct *work)
@@ -462,9 +503,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
        struct hl_device_reset_work *device_reset_work =
                container_of(work, struct hl_device_reset_work, reset_work);
        struct hl_device *hdev = device_reset_work->hdev;
-       u16 pending_cnt = HL_PENDING_RESET_PER_SEC;
+       u16 pending_total, pending_cnt;
        struct task_struct *task = NULL;
 
+       if (hdev->pldm)
+               pending_total = HL_PLDM_PENDING_RESET_PER_SEC;
+       else
+               pending_total = HL_PENDING_RESET_PER_SEC;
+
+       pending_cnt = pending_total;
+
        /* Flush all processes that are inside hl_open */
        mutex_lock(&hdev->fd_open_cnt_lock);
 
@@ -489,6 +537,19 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
                }
        }
 
+       pending_cnt = pending_total;
+
+       while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
+
+               pending_cnt--;
+
+               ssleep(1);
+       }
+
+       if (atomic_read(&hdev->fd_open_cnt))
+               dev_crit(hdev->dev,
+                       "Going to hard reset with open user contexts\n");
+
        mutex_unlock(&hdev->fd_open_cnt_lock);
 
        hl_device_reset(hdev, true, true);
index 238dd57c541bdf1e632f8ff008f69bafc3e5e59a..3c509e19d69dc430a668825ab935927f8441b468 100644 (file)
@@ -1201,15 +1201,6 @@ static int goya_stop_external_queues(struct hl_device *hdev)
        return retval;
 }
 
-static void goya_resume_external_queues(struct hl_device *hdev)
-{
-       WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
-       WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
-       WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
-       WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
-       WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
-}
-
 /*
  * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
  *
@@ -1697,12 +1688,11 @@ static void goya_init_golden_registers(struct hl_device *hdev)
 
        /*
         * Workaround for H2 #HW-23 bug
-        * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it
-        * to 16 on KMD DMA
-        * We need to limit only these DMAs because the user can only read
+        * Set DMA max outstanding read requests to 240 on DMA CH 1.
+        * This limitation is still large enough to not affect Gen4 bandwidth.
+        * We need to only limit that DMA channel because the user can only read
         * from Host using DMA CH 1
         */
-       WREG32(mmDMA_CH_0_CFG0, 0x0fff0010);
        WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
 
        goya->hw_cap_initialized |= HW_CAP_GOLDEN;
@@ -2178,36 +2168,6 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
        return retval;
 }
 
-static void goya_resume_internal_queues(struct hl_device *hdev)
-{
-       WREG32(mmMME_QM_GLBL_CFG1, 0);
-       WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC0_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC1_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC2_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC3_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC4_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC5_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC6_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC7_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
-}
-
 static void goya_dma_stall(struct hl_device *hdev)
 {
        WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
@@ -2905,20 +2865,6 @@ int goya_suspend(struct hl_device *hdev)
 {
        int rc;
 
-       rc = goya_stop_internal_queues(hdev);
-
-       if (rc) {
-               dev_err(hdev->dev, "failed to stop internal queues\n");
-               return rc;
-       }
-
-       rc = goya_stop_external_queues(hdev);
-
-       if (rc) {
-               dev_err(hdev->dev, "failed to stop external queues\n");
-               return rc;
-       }
-
        rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
        if (rc)
                dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -2928,15 +2874,7 @@ int goya_suspend(struct hl_device *hdev)
 
 int goya_resume(struct hl_device *hdev)
 {
-       int rc;
-
-       goya_resume_external_queues(hdev);
-       goya_resume_internal_queues(hdev);
-
-       rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
-       if (rc)
-               dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
-       return rc;
+       return goya_init_iatu(hdev);
 }
 
 static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
@@ -3070,7 +3008,7 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
 
        *dma_handle = hdev->asic_prop.sram_base_address;
 
-       base = hdev->pcie_bar[SRAM_CFG_BAR_ID];
+       base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
 
        switch (queue_id) {
        case GOYA_QUEUE_ID_MME:
@@ -3754,7 +3692,7 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
         * WA for HW-23.
         * We can't allow user to read from Host using QMANs other than 1.
         */
-       if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 &&
+       if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
                hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
                                le32_to_cpu(user_dma_pkt->tsize),
                                hdev->asic_prop.va_space_host_start_address,
index a7c95e9f9b9a8808efa70651e66c34625ac82d0a..a8ee52c880cd800651681b866048126b2e9fc478 100644 (file)
@@ -793,11 +793,11 @@ struct hl_vm_hash_node {
  * struct hl_vm_phys_pg_pack - physical page pack.
  * @vm_type: describes the type of the virtual area descriptor.
  * @pages: the physical page array.
+ * @npages: num physical pages in the pack.
+ * @total_size: total size of all the pages in this list.
  * @mapping_cnt: number of shared mappings.
  * @asid: the context related to this list.
- * @npages: num physical pages in the pack.
  * @page_size: size of each page in the pack.
- * @total_size: total size of all the pages in this list.
  * @flags: HL_MEM_* flags related to this list.
  * @handle: the provided handle related to this list.
  * @offset: offset from the first page.
@@ -807,11 +807,11 @@ struct hl_vm_hash_node {
 struct hl_vm_phys_pg_pack {
        enum vm_type_t          vm_type; /* must be first */
        u64                     *pages;
+       u64                     npages;
+       u64                     total_size;
        atomic_t                mapping_cnt;
        u32                     asid;
-       u32                     npages;
        u32                     page_size;
-       u32                     total_size;
        u32                     flags;
        u32                     handle;
        u32                     offset;
@@ -1056,13 +1056,15 @@ struct hl_device_reset_work {
  * @cb_pool_lock: protects the CB pool.
  * @user_ctx: current user context executing.
  * @dram_used_mem: current DRAM memory consumption.
- * @in_reset: is device in reset flow.
- * @curr_pll_profile: current PLL profile.
- * @fd_open_cnt: number of open user processes.
  * @timeout_jiffies: device CS timeout value.
  * @max_power: the max power of the device, as configured by the sysadmin. This
  *             value is saved so in case of hard-reset, KMD will restore this
  *             value and update the F/W after the re-initialization
+ * @in_reset: is device in reset flow.
+ * @curr_pll_profile: current PLL profile.
+ * @fd_open_cnt: number of open user processes.
+ * @cs_active_cnt: number of active command submissions on this device (active
+ *                 means already in H/W queues)
  * @major: habanalabs KMD major.
  * @high_pll: high PLL profile frequency.
  * @soft_reset_cnt: number of soft reset since KMD loading.
@@ -1128,11 +1130,12 @@ struct hl_device {
        struct hl_ctx                   *user_ctx;
 
        atomic64_t                      dram_used_mem;
+       u64                             timeout_jiffies;
+       u64                             max_power;
        atomic_t                        in_reset;
        atomic_t                        curr_pll_profile;
        atomic_t                        fd_open_cnt;
-       u64                             timeout_jiffies;
-       u64                             max_power;
+       atomic_t                        cs_active_cnt;
        u32                             major;
        u32                             high_pll;
        u32                             soft_reset_cnt;
index 67bece26417cbe930fa018abdb33c88ba8618b23..ef3bb695136025971c76b916a97dde8a4b36905b 100644 (file)
@@ -370,12 +370,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
                spin_unlock(&hdev->hw_queues_mirror_lock);
        }
 
-       list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) {
+       atomic_inc(&hdev->cs_active_cnt);
+
+       list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
                if (job->ext_queue)
                        ext_hw_queue_schedule_job(job);
                else
                        int_hw_queue_schedule_job(job);
-       }
 
        cs->submitted = true;
 
index 3a12fd1a5274479e89406947991fd709203e6726..ce1fda40a8b8112572b9a26db139c8aa6de76f8e 100644 (file)
@@ -56,9 +56,9 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
        struct hl_device *hdev = ctx->hdev;
        struct hl_vm *vm = &hdev->vm;
        struct hl_vm_phys_pg_pack *phys_pg_pack;
-       u64 paddr = 0;
-       u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift;
-       int handle, rc, i;
+       u64 paddr = 0, total_size, num_pgs, i;
+       u32 num_curr_pgs, page_size, page_shift;
+       int handle, rc;
        bool contiguous;
 
        num_curr_pgs = 0;
@@ -73,7 +73,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
                paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
                if (!paddr) {
                        dev_err(hdev->dev,
-                               "failed to allocate %u huge contiguous pages\n",
+                               "failed to allocate %llu huge contiguous pages\n",
                                num_pgs);
                        return -ENOMEM;
                }
@@ -93,7 +93,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
        phys_pg_pack->flags = args->flags;
        phys_pg_pack->contiguous = contiguous;
 
-       phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL);
+       phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
        if (!phys_pg_pack->pages) {
                rc = -ENOMEM;
                goto pages_arr_err;
@@ -148,7 +148,7 @@ page_err:
                        gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
                                        page_size);
 
-       kfree(phys_pg_pack->pages);
+       kvfree(phys_pg_pack->pages);
 pages_arr_err:
        kfree(phys_pg_pack);
 pages_pack_err:
@@ -267,7 +267,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
                struct hl_vm_phys_pg_pack *phys_pg_pack)
 {
        struct hl_vm *vm = &hdev->vm;
-       int i;
+       u64 i;
 
        if (!phys_pg_pack->created_from_userptr) {
                if (phys_pg_pack->contiguous) {
@@ -288,7 +288,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
                }
        }
 
-       kfree(phys_pg_pack->pages);
+       kvfree(phys_pg_pack->pages);
        kfree(phys_pg_pack);
 }
 
@@ -519,7 +519,7 @@ static inline int add_va_block(struct hl_device *hdev,
  * - Return the start address of the virtual block
  */
 static u64 get_va_block(struct hl_device *hdev,
-               struct hl_va_range *va_range, u32 size, u64 hint_addr,
+               struct hl_va_range *va_range, u64 size, u64 hint_addr,
                bool is_userptr)
 {
        struct hl_vm_va_block *va_block, *new_va_block = NULL;
@@ -577,7 +577,8 @@ static u64 get_va_block(struct hl_device *hdev,
        }
 
        if (!new_va_block) {
-               dev_err(hdev->dev, "no available va block for size %u\n", size);
+               dev_err(hdev->dev, "no available va block for size %llu\n",
+                               size);
                goto out;
        }
 
@@ -648,8 +649,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
        struct hl_vm_phys_pg_pack *phys_pg_pack;
        struct scatterlist *sg;
        dma_addr_t dma_addr;
-       u64 page_mask;
-       u32 npages, total_npages, page_size = PAGE_SIZE;
+       u64 page_mask, total_npages;
+       u32 npages, page_size = PAGE_SIZE;
        bool first = true, is_huge_page_opt = true;
        int rc, i, j;
 
@@ -691,7 +692,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
 
        page_mask = ~(((u64) page_size) - 1);
 
-       phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL);
+       phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
+                                               GFP_KERNEL);
        if (!phys_pg_pack->pages) {
                rc = -ENOMEM;
                goto page_pack_arr_mem_err;
@@ -750,9 +752,9 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
                struct hl_vm_phys_pg_pack *phys_pg_pack)
 {
        struct hl_device *hdev = ctx->hdev;
-       u64 next_vaddr = vaddr, paddr;
+       u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
        u32 page_size = phys_pg_pack->page_size;
-       int i, rc = 0, mapped_pg_cnt = 0;
+       int rc = 0;
 
        for (i = 0 ; i < phys_pg_pack->npages ; i++) {
                paddr = phys_pg_pack->pages[i];
@@ -764,7 +766,7 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
                rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
                if (rc) {
                        dev_err(hdev->dev,
-                               "map failed for handle %u, npages: %d, mapped: %d",
+                               "map failed for handle %u, npages: %llu, mapped: %llu",
                                phys_pg_pack->handle, phys_pg_pack->npages,
                                mapped_pg_cnt);
                        goto err;
@@ -985,10 +987,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
        struct hl_vm_hash_node *hnode = NULL;
        struct hl_userptr *userptr = NULL;
        enum vm_type_t *vm_type;
-       u64 next_vaddr;
+       u64 next_vaddr, i;
        u32 page_size;
        bool is_userptr;
-       int i, rc;
+       int rc;
 
        /* protect from double entrance */
        mutex_lock(&ctx->mem_hash_lock);
index 2f2e99cb27439433bd4527350b2347a6856cab5d..3a5a2cec83051b08c1b838372aaf29c0f1b99e13 100644 (file)
@@ -832,7 +832,7 @@ err:
 int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
 {
        struct hl_device *hdev = ctx->hdev;
-       u64 real_virt_addr;
+       u64 real_virt_addr, real_phys_addr;
        u32 real_page_size, npages;
        int i, rc, mapped_cnt = 0;
 
@@ -857,14 +857,16 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
 
        npages = page_size / real_page_size;
        real_virt_addr = virt_addr;
+       real_phys_addr = phys_addr;
 
        for (i = 0 ; i < npages ; i++) {
-               rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr,
+               rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
                                real_page_size);
                if (rc)
                        goto err;
 
                real_virt_addr += real_page_size;
+               real_phys_addr += real_page_size;
                mapped_cnt++;
        }
 
index c712b7deb3a9d88e62416005d2444e908a4fb664..7c8f203f9a24d38bbd1c2b870644e511a1db76d3 100644 (file)
@@ -48,7 +48,6 @@ struct alcor_sdmmc_host {
        struct mmc_command *cmd;
        struct mmc_data *data;
        unsigned int dma_on:1;
-       unsigned int early_data:1;
 
        struct mutex cmd_mutex;
 
@@ -144,8 +143,7 @@ static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
        host->sg_count--;
 }
 
-static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
-                                       bool early)
+static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host)
 {
        struct alcor_pci_priv *priv = host->alcor_pci;
        struct mmc_data *data = host->data;
@@ -155,13 +153,6 @@ static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
                ctrl |= AU6601_DATA_WRITE;
 
        if (data->host_cookie == COOKIE_MAPPED) {
-               if (host->early_data) {
-                       host->early_data = false;
-                       return;
-               }
-
-               host->early_data = early;
-
                alcor_data_set_dma(host);
                ctrl |= AU6601_DATA_DMA_MODE;
                host->dma_on = 1;
@@ -231,6 +222,7 @@ static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host)
 static void alcor_prepare_data(struct alcor_sdmmc_host *host,
                               struct mmc_command *cmd)
 {
+       struct alcor_pci_priv *priv = host->alcor_pci;
        struct mmc_data *data = cmd->data;
 
        if (!data)
@@ -248,7 +240,7 @@ static void alcor_prepare_data(struct alcor_sdmmc_host *host,
        if (data->host_cookie != COOKIE_MAPPED)
                alcor_prepare_sg_miter(host);
 
-       alcor_trigger_data_transfer(host, true);
+       alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL);
 }
 
 static void alcor_send_cmd(struct alcor_sdmmc_host *host,
@@ -435,7 +427,7 @@ static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
        if (!host->data)
                return false;
 
-       alcor_trigger_data_transfer(host, false);
+       alcor_trigger_data_transfer(host);
        host->cmd = NULL;
        return true;
 }
@@ -456,7 +448,7 @@ static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
        if (!host->data)
                alcor_request_complete(host, 1);
        else
-               alcor_trigger_data_transfer(host, false);
+               alcor_trigger_data_transfer(host);
        host->cmd = NULL;
 }
 
@@ -487,15 +479,9 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
                break;
        case AU6601_INT_READ_BUF_RDY:
                alcor_trf_block_pio(host, true);
-               if (!host->blocks)
-                       break;
-               alcor_trigger_data_transfer(host, false);
                return 1;
        case AU6601_INT_WRITE_BUF_RDY:
                alcor_trf_block_pio(host, false);
-               if (!host->blocks)
-                       break;
-               alcor_trigger_data_transfer(host, false);
                return 1;
        case AU6601_INT_DMA_END:
                if (!host->sg_count)
@@ -508,8 +494,14 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
                break;
        }
 
-       if (intmask & AU6601_INT_DATA_END)
-               return 0;
+       if (intmask & AU6601_INT_DATA_END) {
+               if (!host->dma_on && host->blocks) {
+                       alcor_trigger_data_transfer(host);
+                       return 1;
+               } else {
+                       return 0;
+               }
+       }
 
        return 1;
 }
@@ -1044,14 +1036,27 @@ static void alcor_init_mmc(struct alcor_sdmmc_host *host)
        mmc->caps2 = MMC_CAP2_NO_SDIO;
        mmc->ops = &alcor_sdc_ops;
 
-       /* Hardware cannot do scatter lists */
+       /* The hardware does DMA data transfer of 4096 bytes to/from a single
+        * buffer address. Scatterlists are not supported, but upon DMA
+        * completion (signalled via IRQ), the original vendor driver does
+        * then immediately set up another DMA transfer of the next 4096
+        * bytes.
+        *
+        * This means that we need to handle the I/O in 4096 byte chunks.
+        * Lacking a way to limit the sglist entries to 4096 bytes, we instead
+        * impose that only one segment is provided, with maximum size 4096,
+        * which also happens to be the minimum size. This means that the
+        * single-entry sglist handled by this driver can be handed directly
+        * to the hardware, nice and simple.
+        *
+        * Unfortunately though, that means we only do 4096 bytes I/O per
+        * MMC command. A future improvement would be to make the driver
+        * accept sg lists and entries of any size, and simply iterate
+        * through them 4096 bytes at a time.
+        */
        mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
        mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
-
-       mmc->max_blk_size = mmc->max_seg_size;
-       mmc->max_blk_count = mmc->max_segs;
-
-       mmc->max_req_size = mmc->max_seg_size * mmc->max_segs;
+       mmc->max_req_size = mmc->max_seg_size;
 }
 
 static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
index 49e0daf2ef5e1a99cf13eb1d7b46d71f0195a9c5..f37003df1e016f0b3b9cec2368ae23a3b7482dd2 100644 (file)
@@ -1117,7 +1117,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
 {
 }
 #endif
-static void __init init_mmcsd_host(struct mmc_davinci_host *host)
+static void init_mmcsd_host(struct mmc_davinci_host *host)
 {
 
        mmc_davinci_reset_ctrl(host, 1);
index d54612257b068441ae3ffe3aaa9b7d7f85a69182..45f7b9b53d48267f448f4d19c357391e9621a90b 100644 (file)
@@ -290,11 +290,8 @@ static void mxcmci_swap_buffers(struct mmc_data *data)
        struct scatterlist *sg;
        int i;
 
-       for_each_sg(data->sg, sg, data->sg_len, i) {
-               void *buf = kmap_atomic(sg_page(sg) + sg->offset);
-               buffer_swap32(buf, sg->length);
-               kunmap_atomic(buf);
-       }
+       for_each_sg(data->sg, sg, data->sg_len, i)
+               buffer_swap32(sg_virt(sg), sg->length);
 }
 #else
 static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
@@ -611,7 +608,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
 {
        struct mmc_data *data = host->req->data;
        struct scatterlist *sg;
-       void *buf;
        int stat, i;
 
        host->data = data;
@@ -619,18 +615,14 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
 
        if (data->flags & MMC_DATA_READ) {
                for_each_sg(data->sg, sg, data->sg_len, i) {
-                       buf = kmap_atomic(sg_page(sg) + sg->offset);
-                       stat = mxcmci_pull(host, buf, sg->length);
-                       kunmap(buf);
+                       stat = mxcmci_pull(host, sg_virt(sg), sg->length);
                        if (stat)
                                return stat;
                        host->datasize += sg->length;
                }
        } else {
                for_each_sg(data->sg, sg, data->sg_len, i) {
-                       buf = kmap_atomic(sg_page(sg) + sg->offset);
-                       stat = mxcmci_push(host, buf, sg->length);
-                       kunmap(buf);
+                       stat = mxcmci_push(host, sg_virt(sg), sg->length);
                        if (stat)
                                return stat;
                        host->datasize += sg->length;
index c907bf502a123b5b588d8a70e3446fca1da20a66..c1d3f0e3892131a46192a68e12807d39b1a36c69 100644 (file)
@@ -162,7 +162,7 @@ static void pxamci_dma_irq(void *param);
 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
 {
        struct dma_async_tx_descriptor *tx;
-       enum dma_data_direction direction;
+       enum dma_transfer_direction direction;
        struct dma_slave_config config;
        struct dma_chan *chan;
        unsigned int nob = data->blocks;
index 71e13844df6c0deaa1a498140e8f553a04ad3148..8742e27e4e8bca8acdb73c37e334cc7f22b2d01f 100644 (file)
@@ -641,6 +641,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
        struct renesas_sdhi *priv;
        struct resource *res;
        int irq, ret, i;
+       u16 ver;
 
        of_data = of_device_get_match_data(&pdev->dev);
 
@@ -773,12 +774,17 @@ int renesas_sdhi_probe(struct platform_device *pdev,
        if (ret)
                goto efree;
 
+       ver = sd_ctrl_read16(host, CTL_VERSION);
+       /* GEN2_SDR104 is first known SDHI to use 32bit block count */
+       if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
+               mmc_data->max_blk_count = U16_MAX;
+
        ret = tmio_mmc_host_probe(host);
        if (ret < 0)
                goto edisclk;
 
        /* One Gen2 SDHI incarnation does NOT have a CBSY bit */
-       if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50)
+       if (ver == SDHI_VER_GEN2_SDR50)
                mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
 
        /* Enable tuning iff we have an SCC and a supported mode */
index b1a66ca3821a51f97be942ee9596897008e64215..9f20fff9781b0791ea36c1bd3cd657dd1b50e8c6 100644 (file)
@@ -797,6 +797,43 @@ void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
        sdhci_reset(host, mask);
 }
 
+#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\
+                     SDHCI_INT_TIMEOUT)
+#define CMD_MASK (CMD_ERR_MASK | SDHCI_INT_RESPONSE)
+
+static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask)
+{
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+       if (omap_host->is_tuning && host->cmd && !host->data_early &&
+           (intmask & CMD_ERR_MASK)) {
+
+               /*
+                * Since we are not resetting data lines during tuning
+                * operation, data error or data complete interrupts
+                * might still arrive. Mark this request as a failure
+                * but still wait for the data interrupt
+                */
+               if (intmask & SDHCI_INT_TIMEOUT)
+                       host->cmd->error = -ETIMEDOUT;
+               else
+                       host->cmd->error = -EILSEQ;
+
+               host->cmd = NULL;
+
+               /*
+                * Sometimes command error interrupts and command complete
+                * interrupt will arrive together. Clear all command related
+                * interrupts here.
+                */
+               sdhci_writel(host, intmask & CMD_MASK, SDHCI_INT_STATUS);
+               intmask &= ~CMD_MASK;
+       }
+
+       return intmask;
+}
+
 static struct sdhci_ops sdhci_omap_ops = {
        .set_clock = sdhci_omap_set_clock,
        .set_power = sdhci_omap_set_power,
@@ -807,6 +844,7 @@ static struct sdhci_ops sdhci_omap_ops = {
        .platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
        .reset = sdhci_omap_reset,
        .set_uhs_signaling = sdhci_omap_set_uhs_signaling,
+       .irq = sdhci_omap_irq,
 };
 
 static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
@@ -1056,6 +1094,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
                        mmc->f_max = 48000000;
        }
 
+       if (!mmc_can_gpio_ro(mmc))
+               mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
+
        pltfm_host->clk = devm_clk_get(dev, "fck");
        if (IS_ERR(pltfm_host->clk)) {
                ret = PTR_ERR(pltfm_host->clk);
index 72428b6bfc474ba6d757b94d79ff62804cc7c8ec..7b7286b4d81ef660d22a9ca93e5f0f2870ea5666 100644 (file)
@@ -1876,7 +1876,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
                        continue;
                }
 
-               if (time_after(jiffies, timeo) && !chip_ready(map, adr))
+               /*
+                * We check "time_after" and "!chip_good" before checking "chip_good" to avoid
+                * the failure due to scheduling.
+                */
+               if (time_after(jiffies, timeo) && !chip_good(map, adr, datum))
                        break;
 
                if (chip_good(map, adr, datum)) {
index f38e5c1b87e474557a1bc0392f6a7ea3b51638cf..d984538980e28defc021c597029f6add135dad13 100644 (file)
@@ -722,12 +722,6 @@ static void marvell_nfc_select_target(struct nand_chip *chip,
        struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
        u32 ndcr_generic;
 
-       if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
-               return;
-
-       writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
-       writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
-
        /*
         * Reset the NDCR register to a clean state for this particular chip,
         * also clear ND_RUN bit.
@@ -739,6 +733,12 @@ static void marvell_nfc_select_target(struct nand_chip *chip,
        /* Also reset the interrupt status register */
        marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
 
+       if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
+               return;
+
+       writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
+       writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
+
        nfc->selected_chip = chip;
        marvell_nand->selected_die = die_nr;
 }
index 5e4ca082cfcdb29845326adf3341d1dd3b461811..7a96d168efc41dce1510fbac29522ce06851a8db 100644 (file)
@@ -216,8 +216,8 @@ config GENEVE
 
 config GTP
        tristate "GPRS Tunneling Protocol datapath (GTP-U)"
-       depends on INET && NET_UDP_TUNNEL
-       select NET_IP_TUNNEL
+       depends on INET
+       select NET_UDP_TUNNEL
        ---help---
          This allows one to create gtp virtual interfaces that provide
          the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol
index b59708c35fafe87a926f4b47c81d89ba56f695e4..ee610721098e628d01ad988b1a9cad40aeef6f70 100644 (file)
@@ -3213,8 +3213,12 @@ static int bond_netdev_event(struct notifier_block *this,
                return NOTIFY_DONE;
 
        if (event_dev->flags & IFF_MASTER) {
+               int ret;
+
                netdev_dbg(event_dev, "IFF_MASTER\n");
-               return bond_master_netdev_event(event, event_dev);
+               ret = bond_master_netdev_event(event, event_dev);
+               if (ret != NOTIFY_DONE)
+                       return ret;
        }
 
        if (event_dev->flags & IFF_SLAVE) {
index 2f120b2ffef0cfd7d97f6a901f9552fcc58288df..4985268e227330045e1cc0a6f3dadcb81e4c2830 100644 (file)
@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
 
 static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
 {
-       return sprintf(buf, "%pM\n", slave->perm_hwaddr);
+       return sprintf(buf, "%*phC\n",
+                      slave->dev->addr_len,
+                      slave->perm_hwaddr);
 }
 static SLAVE_ATTR_RO(perm_hwaddr);
 
index e6234d20978780ea0ae2b3847cf059619b83004d..4212bc4a5f31ad8c284822567a08190ca3ef536e 100644 (file)
@@ -886,6 +886,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
             fs->m_ext.data[1]))
                return -EINVAL;
 
+       if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
+               return -EINVAL;
+
        if (fs->location != RX_CLS_LOC_ANY &&
            test_bit(fs->location, priv->cfp.used))
                return -EBUSY;
@@ -974,6 +977,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
        struct cfp_rule *rule;
        int ret;
 
+       if (loc >= CFP_NUM_RULES)
+               return -EINVAL;
+
        /* Refuse deleting unused rules, and those that are not unique since
         * that could leave IPv6 rules with one of the chained rule in the
         * table.
index dce84a2a65c71eeec36d10fa9ceb6df0a487866a..c44b2822e4dd064e2cba3a9ca1b0af457893e47e 100644 (file)
@@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                return 0;
 
        lane = mv88e6390x_serdes_get_lane(chip, port);
-       if (lane < 0)
+       if (lane < 0 && lane != -ENODEV)
                return lane;
 
-       if (chip->ports[port].serdes_irq) {
-               err = mv88e6390_serdes_irq_disable(chip, port, lane);
+       if (lane >= 0) {
+               if (chip->ports[port].serdes_irq) {
+                       err = mv88e6390_serdes_irq_disable(chip, port, lane);
+                       if (err)
+                               return err;
+               }
+
+               err = mv88e6390x_serdes_power(chip, port, false);
                if (err)
                        return err;
        }
 
-       err = mv88e6390x_serdes_power(chip, port, false);
-       if (err)
-               return err;
+       chip->ports[port].cmode = 0;
 
        if (cmode) {
                err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
@@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                if (err)
                        return err;
 
+               chip->ports[port].cmode = cmode;
+
+               lane = mv88e6390x_serdes_get_lane(chip, port);
+               if (lane < 0)
+                       return lane;
+
                err = mv88e6390x_serdes_power(chip, port, true);
                if (err)
                        return err;
@@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                }
        }
 
-       chip->ports[port].cmode = cmode;
-
        return 0;
 }
 
index 576b37d12a63ca4ea5064cd568194ec25fa22ee5..c4fa400efdcc82643dcd4d2c762ed8079305adf7 100644 (file)
@@ -481,6 +481,155 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
                qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
 }
 
+static u32
+qca8k_port_to_phy(int port)
+{
+       /* From Andrew Lunn:
+        * Port 0 has no internal phy.
+        * Port 1 has an internal PHY at MDIO address 0.
+        * Port 2 has an internal PHY at MDIO address 1.
+        * ...
+        * Port 5 has an internal PHY at MDIO address 4.
+        * Port 6 has no internal PHY.
+        */
+
+       return port - 1;
+}
+
+static int
+qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data)
+{
+       u32 phy, val;
+
+       if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+               return -EINVAL;
+
+       /* callee is responsible for not passing bad ports,
+        * but we still would like to make spills impossible.
+        */
+       phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+       val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+             QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+             QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
+             QCA8K_MDIO_MASTER_DATA(data);
+
+       qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
+
+       return qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
+               QCA8K_MDIO_MASTER_BUSY);
+}
+
+static int
+qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum)
+{
+       u32 phy, val;
+
+       if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+               return -EINVAL;
+
+       /* callee is responsible for not passing bad ports,
+        * but we still would like to make spills impossible.
+        */
+       phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+       val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+             QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+             QCA8K_MDIO_MASTER_REG_ADDR(regnum);
+
+       qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
+
+       if (qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
+                           QCA8K_MDIO_MASTER_BUSY))
+               return -ETIMEDOUT;
+
+       val = (qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL) &
+               QCA8K_MDIO_MASTER_DATA_MASK);
+
+       return val;
+}
+
+static int
+qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
+{
+       struct qca8k_priv *priv = ds->priv;
+
+       return qca8k_mdio_write(priv, port, regnum, data);
+}
+
+static int
+qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+       struct qca8k_priv *priv = ds->priv;
+       int ret;
+
+       ret = qca8k_mdio_read(priv, port, regnum);
+
+       if (ret < 0)
+               return 0xffff;
+
+       return ret;
+}
+
+static int
+qca8k_setup_mdio_bus(struct qca8k_priv *priv)
+{
+       u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
+       struct device_node *ports, *port;
+       int err;
+
+       ports = of_get_child_by_name(priv->dev->of_node, "ports");
+       if (!ports)
+               return -EINVAL;
+
+       for_each_available_child_of_node(ports, port) {
+               err = of_property_read_u32(port, "reg", &reg);
+               if (err)
+                       return err;
+
+               if (!dsa_is_user_port(priv->ds, reg))
+                       continue;
+
+               if (of_property_read_bool(port, "phy-handle"))
+                       external_mdio_mask |= BIT(reg);
+               else
+                       internal_mdio_mask |= BIT(reg);
+       }
+
+       if (!external_mdio_mask && !internal_mdio_mask) {
+               dev_err(priv->dev, "no PHYs are defined.\n");
+               return -EINVAL;
+       }
+
+       /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
+        * the MDIO_MASTER register also _disconnects_ the external MDC
+        * passthrough to the internal PHYs. It's not possible to use both
+        * configurations at the same time!
+        *
+        * Because this came up during the review process:
+        * If the external mdio-bus driver is capable magically disabling
+        * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
+        * accessors for the time being, it would be possible to pull this
+        * off.
+        */
+       if (!!external_mdio_mask && !!internal_mdio_mask) {
+               dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
+               return -EINVAL;
+       }
+
+       if (external_mdio_mask) {
+               /* Make sure to disable the internal mdio bus in cases
+                * a dt-overlay and driver reload changed the configuration
+                */
+
+               qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
+                               QCA8K_MDIO_MASTER_EN);
+               return 0;
+       }
+
+       priv->ops.phy_read = qca8k_phy_read;
+       priv->ops.phy_write = qca8k_phy_write;
+       return 0;
+}
+
 static int
 qca8k_setup(struct dsa_switch *ds)
 {
@@ -502,6 +651,10 @@ qca8k_setup(struct dsa_switch *ds)
        if (IS_ERR(priv->regmap))
                pr_warn("regmap initialization failed");
 
+       ret = qca8k_setup_mdio_bus(priv);
+       if (ret)
+               return ret;
+
        /* Initialize CPU port pad mode (xMII type, delays...) */
        phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn);
        if (phy_mode < 0) {
@@ -624,22 +777,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
        qca8k_port_set_status(priv, port, 1);
 }
 
-static int
-qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
-       return mdiobus_read(priv->bus, phy, regnum);
-}
-
-static int
-qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
-       return mdiobus_write(priv->bus, phy, regnum, val);
-}
-
 static void
 qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
 {
@@ -879,8 +1016,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
        .setup                  = qca8k_setup,
        .adjust_link            = qca8k_adjust_link,
        .get_strings            = qca8k_get_strings,
-       .phy_read               = qca8k_phy_read,
-       .phy_write              = qca8k_phy_write,
        .get_ethtool_stats      = qca8k_get_ethtool_stats,
        .get_sset_count         = qca8k_get_sset_count,
        .get_mac_eee            = qca8k_get_mac_eee,
@@ -923,7 +1058,8 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
                return -ENOMEM;
 
        priv->ds->priv = priv;
-       priv->ds->ops = &qca8k_switch_ops;
+       priv->ops = qca8k_switch_ops;
+       priv->ds->ops = &priv->ops;
        mutex_init(&priv->reg_mutex);
        dev_set_drvdata(&mdiodev->dev, priv);
 
index d146e54c8a6c615045ff18b31b413fba08365221..249fd62268e5450ff41fa33ef7d7ba055d35461c 100644 (file)
 #define   QCA8K_MIB_FLUSH                              BIT(24)
 #define   QCA8K_MIB_CPU_KEEP                           BIT(20)
 #define   QCA8K_MIB_BUSY                               BIT(17)
+#define QCA8K_MDIO_MASTER_CTRL                         0x3c
+#define   QCA8K_MDIO_MASTER_BUSY                       BIT(31)
+#define   QCA8K_MDIO_MASTER_EN                         BIT(30)
+#define   QCA8K_MDIO_MASTER_READ                       BIT(27)
+#define   QCA8K_MDIO_MASTER_WRITE                      0
+#define   QCA8K_MDIO_MASTER_SUP_PRE                    BIT(26)
+#define   QCA8K_MDIO_MASTER_PHY_ADDR(x)                        ((x) << 21)
+#define   QCA8K_MDIO_MASTER_REG_ADDR(x)                        ((x) << 16)
+#define   QCA8K_MDIO_MASTER_DATA(x)                    (x)
+#define   QCA8K_MDIO_MASTER_DATA_MASK                  GENMASK(15, 0)
+#define   QCA8K_MDIO_MASTER_MAX_PORTS                  5
+#define   QCA8K_MDIO_MASTER_MAX_REG                    32
 #define QCA8K_GOL_MAC_ADDR0                            0x60
 #define QCA8K_GOL_MAC_ADDR1                            0x64
 #define QCA8K_REG_PORT_STATUS(_i)                      (0x07c + (_i) * 4)
@@ -169,6 +181,7 @@ struct qca8k_priv {
        struct dsa_switch *ds;
        struct mutex reg_mutex;
        struct device *dev;
+       struct dsa_switch_ops ops;
 };
 
 struct qca8k_mib_desc {
index 808abb6b367134e76a79dd2a9bf857d02559af65..b15752267c8dfde6d40b6296154e2cd350b185e4 100644 (file)
@@ -1521,7 +1521,7 @@ static void update_stats(int ioaddr, struct net_device *dev)
 static void set_rx_mode(struct net_device *dev)
 {
        int ioaddr = dev->base_addr;
-       short new_mode;
+       unsigned short new_mode;
 
        if (dev->flags & IFF_PROMISC) {
                if (corkscrew_debug > 3)
index 342ae08ec3c29832ae5be0da8d93e59d6441cab1..d60a86aa8aa8049e7c5216f15f64b6d8406ec115 100644 (file)
@@ -153,8 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
 static void dayna_block_output(struct net_device *dev, int count,
                               const unsigned char *buf, int start_page);
 
-#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
-
 /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
 static void slow_sane_get_8390_hdr(struct net_device *dev,
                                   struct e8390_pkt_hdr *hdr, int ring_page);
@@ -233,19 +231,26 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
 
 static enum mac8390_access mac8390_testio(unsigned long membase)
 {
-       unsigned long outdata = 0xA5A0B5B0;
-       unsigned long indata =  0x00000000;
+       u32 outdata = 0xA5A0B5B0;
+       u32 indata = 0;
+
        /* Try writing 32 bits */
-       memcpy_toio((void __iomem *)membase, &outdata, 4);
-       /* Now compare them */
-       if (memcmp_withio(&outdata, membase, 4) == 0)
+       nubus_writel(outdata, membase);
+       /* Now read it back */
+       indata = nubus_readl(membase);
+       if (outdata == indata)
                return ACCESS_32;
+
+       outdata = 0xC5C0D5D0;
+       indata = 0;
+
        /* Write 16 bit output */
        word_memcpy_tocard(membase, &outdata, 4);
        /* Now read it back */
        word_memcpy_fromcard(&indata, membase, 4);
        if (outdata == indata)
                return ACCESS_16;
+
        return ACCESS_UNKNOWN;
 }
 
index 74550ccc7a20ff8437463384e906b718027dc6ef..e2ffb159cbe2eeb5980a89aa688ebde8826fc7e6 100644 (file)
@@ -186,11 +186,12 @@ static void aq_rx_checksum(struct aq_ring_s *self,
        }
        if (buff->is_ip_cso) {
                __skb_incr_checksum_unnecessary(skb);
-               if (buff->is_udp_cso || buff->is_tcp_cso)
-                       __skb_incr_checksum_unnecessary(skb);
        } else {
                skb->ip_summed = CHECKSUM_NONE;
        }
+
+       if (buff->is_udp_cso || buff->is_tcp_cso)
+               __skb_incr_checksum_unnecessary(skb);
 }
 
 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
index 9e07b469066a4bbc41119e0801d716a25dce9a55..156fbc5601ca3ece9b07c7ab14314ff4883127f7 100644 (file)
@@ -1721,7 +1721,7 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
        adapter->soft_stats.scc += smb->tx_1_col;
        adapter->soft_stats.mcc += smb->tx_2_col;
        adapter->soft_stats.latecol += smb->tx_late_col;
-       adapter->soft_stats.tx_underun += smb->tx_underrun;
+       adapter->soft_stats.tx_underrun += smb->tx_underrun;
        adapter->soft_stats.tx_trunc += smb->tx_trunc;
        adapter->soft_stats.tx_pause += smb->tx_pause;
 
@@ -3179,7 +3179,7 @@ static struct atl1_stats atl1_gstrings_stats[] = {
        {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
        {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
        {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
-       {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
+       {"tx_underrun", ATL1_STAT(soft_stats.tx_underrun)},
        {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
        {"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
        {"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
index 34a58cd846a05929f5c22dfa46619f6240e85e5e..eacff19ea05b820bec71fdba222b7a7e263141db 100644 (file)
@@ -681,7 +681,7 @@ struct atl1_sft_stats {
        u64 scc;                /* packets TX after a single collision */
        u64 mcc;                /* packets TX after multiple collisions */
        u64 latecol;            /* TX packets w/ late collisions */
-       u64 tx_underun;         /* TX packets aborted due to TX FIFO underrun
+       u64 tx_underrun;        /* TX packets aborted due to TX FIFO underrun
                                 * or TRD FIFO underrun */
        u64 tx_trunc;           /* TX packets truncated due to size > MTU */
        u64 rx_pause;           /* num Pause packets received. */
index d99317b3d891b0a608aafef51352eea6ba58fbb2..98da0fa27192ddbab7c04651854b0fd94baa6b2a 100644 (file)
@@ -553,7 +553,7 @@ static void atl2_intr_tx(struct atl2_adapter *adapter)
                        netdev->stats.tx_aborted_errors++;
                if (txs->late_col)
                        netdev->stats.tx_window_errors++;
-               if (txs->underun)
+               if (txs->underrun)
                        netdev->stats.tx_fifo_errors++;
        } while (1);
 
index c64a6bdfa7ae4927da9dc3c38ac4ca5b8956354e..25ec84cb48535b1a7dc0180e176af77aabf0bf03 100644 (file)
@@ -260,7 +260,7 @@ struct tx_pkt_status {
        unsigned multi_col:1;
        unsigned late_col:1;
        unsigned abort_col:1;
-       unsigned underun:1;     /* current packet is aborted
+       unsigned underrun:1;    /* current packet is aborted
                                 * due to txram underrun */
        unsigned:3;             /* reserved */
        unsigned update:1;      /* always 1'b1 in tx_status_buf */
index a9bdc21873d32f31620ac169f8aff5b76cd02f7f..10ff37d6dc783b796c690a4d73bc90caa4cad931 100644 (file)
@@ -957,7 +957,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
        bnx2x_sample_bulletin(bp);
 
        if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
-               BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+               BNX2X_ERR("Hypervisor will decline the request, avoiding\n");
                rc = -EINVAL;
                goto out;
        }
index 0bb9d7b3a2b622211a2d401fd81dd70f6cff4ea0..52ade133b57cf68940327aeea75eeb9ee974dd86 100644 (file)
@@ -1133,6 +1133,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
        tpa_info = &rxr->rx_tpa[agg_id];
 
        if (unlikely(cons != rxr->rx_next_cons)) {
+               netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
+                           cons, rxr->rx_next_cons);
                bnxt_sched_reset(bp, rxr);
                return;
        }
@@ -1585,15 +1587,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        }
 
        cons = rxcmp->rx_cmp_opaque;
-       rx_buf = &rxr->rx_buf_ring[cons];
-       data = rx_buf->data;
-       data_ptr = rx_buf->data_ptr;
        if (unlikely(cons != rxr->rx_next_cons)) {
                int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
 
+               netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
+                           cons, rxr->rx_next_cons);
                bnxt_sched_reset(bp, rxr);
                return rc1;
        }
+       rx_buf = &rxr->rx_buf_ring[cons];
+       data = rx_buf->data;
+       data_ptr = rx_buf->data_ptr;
        prefetch(data_ptr);
 
        misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
@@ -1610,12 +1614,18 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 
        rx_buf->data = NULL;
        if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+               u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
+
                bnxt_reuse_rx_data(rxr, cons, data);
                if (agg_bufs)
                        bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
 
                rc = -EIO;
-               goto next_rx;
+               if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
+                       netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
+                       bnxt_sched_reset(bp, rxr);
+               }
+               goto next_rx_no_len;
        }
 
        len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
@@ -1696,12 +1706,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        rc = 1;
 
 next_rx:
-       rxr->rx_prod = NEXT_RX(prod);
-       rxr->rx_next_cons = NEXT_RX(cons);
-
        cpr->rx_packets += 1;
        cpr->rx_bytes += len;
 
+next_rx_no_len:
+       rxr->rx_prod = NEXT_RX(prod);
+       rxr->rx_next_cons = NEXT_RX(cons);
+
 next_rx_no_prod_no_len:
        *raw_cons = tmp_raw_cons;
 
@@ -5125,10 +5136,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
        for (i = 0; i < bp->tx_nr_rings; i++) {
                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
                struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
-               u32 cmpl_ring_id;
 
-               cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
                if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+                       u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
+
                        hwrm_ring_free_send_msg(bp, ring,
                                                RING_FREE_REQ_RING_TYPE_TX,
                                                close_path ? cmpl_ring_id :
@@ -5141,10 +5152,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
                struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
                u32 grp_idx = rxr->bnapi->index;
-               u32 cmpl_ring_id;
 
-               cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
                if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+                       u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+
                        hwrm_ring_free_send_msg(bp, ring,
                                                RING_FREE_REQ_RING_TYPE_RX,
                                                close_path ? cmpl_ring_id :
@@ -5163,10 +5174,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
                struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
                u32 grp_idx = rxr->bnapi->index;
-               u32 cmpl_ring_id;
 
-               cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
                if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+                       u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+
                        hwrm_ring_free_send_msg(bp, ring, type,
                                                close_path ? cmpl_ring_id :
                                                INVALID_HW_RING_ID);
@@ -5305,17 +5316,16 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
        req->num_tx_rings = cpu_to_le16(tx_rings);
        if (BNXT_NEW_RM(bp)) {
                enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+               enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
                if (bp->flags & BNXT_FLAG_CHIP_P5) {
                        enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
                        enables |= tx_rings + ring_grps ?
-                                  FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
-                                  FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+                                  FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
                        enables |= rx_rings ?
                                FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
                } else {
                        enables |= cp_rings ?
-                                  FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
-                                  FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+                                  FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
                        enables |= ring_grps ?
                                   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
                                   FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
@@ -5355,14 +5365,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
        enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
        enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
                              FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+       enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
        if (bp->flags & BNXT_FLAG_CHIP_P5) {
                enables |= tx_rings + ring_grps ?
-                          FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
-                          FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+                          FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
        } else {
                enables |= cp_rings ?
-                          FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
-                          FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+                          FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
                enables |= ring_grps ?
                           FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
        }
@@ -6743,6 +6752,7 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
        struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
        struct hwrm_port_qstats_ext_input req = {0};
        struct bnxt_pf_info *pf = &bp->pf;
+       u32 tx_stat_size;
        int rc;
 
        if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
@@ -6752,13 +6762,16 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
        req.port_id = cpu_to_le16(pf->port_id);
        req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
        req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
-       req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
+       tx_stat_size = bp->hw_tx_port_stats_ext ?
+                      sizeof(*bp->hw_tx_port_stats_ext) : 0;
+       req.tx_stat_size = cpu_to_le16(tx_stat_size);
        req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
        mutex_lock(&bp->hwrm_cmd_lock);
        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (!rc) {
                bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
-               bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
+               bp->fw_tx_stats_ext_size = tx_stat_size ?
+                       le16_to_cpu(resp->tx_stat_size) / 8 : 0;
        } else {
                bp->fw_rx_stats_ext_size = 0;
                bp->fw_tx_stats_ext_size = 0;
@@ -8951,8 +8964,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
 
 skip_uc:
        rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+       if (rc && vnic->mc_list_count) {
+               netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
+                           rc);
+               vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+               vnic->mc_list_count = 0;
+               rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+       }
        if (rc)
-               netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
+               netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
                           rc);
 
        return rc;
@@ -10675,6 +10695,7 @@ init_err_cleanup_tc:
        bnxt_clear_int_mode(bp);
 
 init_err_pci_clean:
+       bnxt_free_hwrm_short_cmd_req(bp);
        bnxt_free_hwrm_resources(bp);
        bnxt_free_ctx_mem(bp);
        kfree(bp->ctx);
index 328373e0578ff83bf5d5bb336103deba33144e99..060a6f386104ac5511a381a9ef308f956fc6f551 100644 (file)
@@ -4283,7 +4283,7 @@ static void tg3_power_down(struct tg3 *tp)
        pci_set_power_state(tp->pdev, PCI_D3hot);
 }
 
-static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
+static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
 {
        switch (val & MII_TG3_AUX_STAT_SPDMASK) {
        case MII_TG3_AUX_STAT_10HALF:
@@ -4787,7 +4787,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
        bool current_link_up;
        u32 bmsr, val;
        u32 lcl_adv, rmt_adv;
-       u16 current_speed;
+       u32 current_speed;
        u8 current_duplex;
        int i, err;
 
@@ -5719,7 +5719,7 @@ out:
 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
 {
        u32 orig_pause_cfg;
-       u16 orig_active_speed;
+       u32 orig_active_speed;
        u8 orig_active_duplex;
        u32 mac_status;
        bool current_link_up;
@@ -5823,7 +5823,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
 {
        int err = 0;
        u32 bmsr, bmcr;
-       u16 current_speed = SPEED_UNKNOWN;
+       u32 current_speed = SPEED_UNKNOWN;
        u8 current_duplex = DUPLEX_UNKNOWN;
        bool current_link_up = false;
        u32 local_adv, remote_adv, sgsr;
index a772a33b685c5eb8c28137107eb33cb4b6ffeb1d..6953d0546acb320196887a51d07312977d4f49ed 100644 (file)
@@ -2873,7 +2873,7 @@ struct tg3_tx_ring_info {
 struct tg3_link_config {
        /* Describes what we're trying to get. */
        u32                             advertising;
-       u16                             speed;
+       u32                             speed;
        u8                              duplex;
        u8                              autoneg;
        u8                              flowctrl;
@@ -2882,7 +2882,7 @@ struct tg3_link_config {
        u8                              active_flowctrl;
 
        u8                              active_duplex;
-       u16                             active_speed;
+       u32                             active_speed;
        u32                             rmt_adv;
 };
 
index ad099fd01b45ae947492e828337c76df6d701587..3da2795e248638abbc8d50a194dd8061325aad41 100644 (file)
@@ -898,7 +898,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
 
                        /* First, update TX stats if needed */
                        if (skb) {
-                               if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
+                               if (unlikely(skb_shinfo(skb)->tx_flags &
+                                            SKBTX_HW_TSTAMP) &&
+                                   gem_ptp_do_txstamp(queue, skb, desc) == 0) {
                                        /* skb now belongs to timestamp buffer
                                         * and will be removed later
                                         */
@@ -3370,14 +3372,20 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
                *hclk = devm_clk_get(&pdev->dev, "hclk");
        }
 
-       if (IS_ERR(*pclk)) {
+       if (IS_ERR_OR_NULL(*pclk)) {
                err = PTR_ERR(*pclk);
+               if (!err)
+                       err = -ENODEV;
+
                dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
                return err;
        }
 
-       if (IS_ERR(*hclk)) {
+       if (IS_ERR_OR_NULL(*hclk)) {
                err = PTR_ERR(*hclk);
+               if (!err)
+                       err = -ENODEV;
+
                dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
                return err;
        }
index aa2be480719134f720e9487a3c71b4272cc8efe3..c032bef1b776d74ea4886e8fbddca40c8b7dd868 100644 (file)
 #define DRV_NAME       "nicvf"
 #define DRV_VERSION    "1.0"
 
+/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
+ * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
+ * this value, keeping headroom for the 14 byte Ethernet header and two
+ * VLAN tags (for QinQ)
+ */
+#define MAX_XDP_MTU    (1530 - ETH_HLEN - VLAN_HLEN * 2)
+
 /* Supported devices */
 static const struct pci_device_id nicvf_id_table[] = {
        { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
@@ -1328,10 +1335,11 @@ int nicvf_stop(struct net_device *netdev)
        struct nicvf_cq_poll *cq_poll = NULL;
        union nic_mbx mbx = {};
 
-       cancel_delayed_work_sync(&nic->link_change_work);
-
        /* wait till all queued set_rx_mode tasks completes */
-       drain_workqueue(nic->nicvf_rx_mode_wq);
+       if (nic->nicvf_rx_mode_wq) {
+               cancel_delayed_work_sync(&nic->link_change_work);
+               drain_workqueue(nic->nicvf_rx_mode_wq);
+       }
 
        mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
        nicvf_send_msg_to_pf(nic, &mbx);
@@ -1452,7 +1460,8 @@ int nicvf_open(struct net_device *netdev)
        struct nicvf_cq_poll *cq_poll = NULL;
 
        /* wait till all queued set_rx_mode tasks completes if any */
-       drain_workqueue(nic->nicvf_rx_mode_wq);
+       if (nic->nicvf_rx_mode_wq)
+               drain_workqueue(nic->nicvf_rx_mode_wq);
 
        netif_carrier_off(netdev);
 
@@ -1550,10 +1559,12 @@ int nicvf_open(struct net_device *netdev)
        /* Send VF config done msg to PF */
        nicvf_send_cfg_done(nic);
 
-       INIT_DELAYED_WORK(&nic->link_change_work,
-                         nicvf_link_status_check_task);
-       queue_delayed_work(nic->nicvf_rx_mode_wq,
-                          &nic->link_change_work, 0);
+       if (nic->nicvf_rx_mode_wq) {
+               INIT_DELAYED_WORK(&nic->link_change_work,
+                                 nicvf_link_status_check_task);
+               queue_delayed_work(nic->nicvf_rx_mode_wq,
+                                  &nic->link_change_work, 0);
+       }
 
        return 0;
 cleanup:
@@ -1578,6 +1589,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
        struct nicvf *nic = netdev_priv(netdev);
        int orig_mtu = netdev->mtu;
 
+       /* For now just support only the usual MTU sized frames,
+        * plus some headroom for VLAN, QinQ.
+        */
+       if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
+               netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+                           netdev->mtu);
+               return -EINVAL;
+       }
+
        netdev->mtu = new_mtu;
 
        if (!netif_running(netdev))
@@ -1826,8 +1846,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
        bool bpf_attached = false;
        int ret = 0;
 
-       /* For now just support only the usual MTU sized frames */
-       if (prog && (dev->mtu > 1500)) {
+       /* For now just support only the usual MTU sized frames,
+        * plus some headroom for VLAN, QinQ.
+        */
+       if (prog && dev->mtu > MAX_XDP_MTU) {
                netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
                            dev->mtu);
                return -EOPNOTSUPP;
index 5b4d3badcb730b1417739d508bae8b3838afaaf9..e246f9733bb89161ceb2c6d39fcbe330b469ca61 100644 (file)
@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
        /* Check if page can be recycled */
        if (page) {
                ref_count = page_ref_count(page);
-               /* Check if this page has been used once i.e 'put_page'
-                * called after packet transmission i.e internal ref_count
-                * and page's ref_count are equal i.e page can be recycled.
+               /* This page can be recycled if internal ref_count and page's
+                * ref_count are equal, indicating that the page has been used
+                * once for packet transmission. For non-XDP mode, internal
+                * ref_count is always '1'.
                 */
-               if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
-                       pgcache->ref_count--;
-               else
-                       page = NULL;
-
-               /* In non-XDP mode, page's ref_count needs to be '1' for it
-                * to be recycled.
-                */
-               if (!rbdr->is_xdp && (ref_count != 1))
+               if (rbdr->is_xdp) {
+                       if (ref_count == pgcache->ref_count)
+                               pgcache->ref_count--;
+                       else
+                               page = NULL;
+               } else if (ref_count != 1) {
                        page = NULL;
+               }
        }
 
        if (!page) {
@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
        while (head < rbdr->pgcnt) {
                pgcache = &rbdr->pgcache[head];
                if (pgcache->page && page_ref_count(pgcache->page) != 0) {
-                       if (!rbdr->is_xdp) {
-                               put_page(pgcache->page);
-                               continue;
+                       if (rbdr->is_xdp) {
+                               page_ref_sub(pgcache->page,
+                                            pgcache->ref_count - 1);
                        }
-                       page_ref_sub(pgcache->page, pgcache->ref_count - 1);
                        put_page(pgcache->page);
                }
                head++;
index 3130b43bba52c9570e76223bf2779c3f3c076c34..02959035ed3f21287a3673f93c55f0e76b549de1 100644 (file)
@@ -2620,7 +2620,7 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
        }
 
        /* should never happen! */
-       BUG_ON(1);
+       BUG();
        return NULL;
 }
 
index 88773ca58e6b1fc45dce1eeea8064174b67407d9..b3da81e90132fd74d26b007ca5414a066547774f 100644 (file)
@@ -476,7 +476,7 @@ static inline int get_buf_size(struct adapter *adapter,
                break;
 
        default:
-               BUG_ON(1);
+               BUG();
        }
 
        return buf_size;
index 74849be5f004f59552892cf642a9b02efb393ac7..e2919005ead3e1592999b140841b0234344e87b7 100644 (file)
@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
                ppmax = max;
 
        /* pool size must be multiple of unsigned long */
-       bmap = BITS_TO_LONGS(ppmax);
+       bmap = ppmax / BITS_PER_TYPE(unsigned long);
+       if (!bmap)
+               return NULL;
+
        ppmax = (bmap * sizeof(unsigned long)) << 3;
 
        alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
        if (reserve_factor) {
                ppmax_pool = ppmax / reserve_factor;
                pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
+               if (!pool) {
+                       ppmax_pool = 0;
+                       reserve_factor = 0;
+               }
 
                pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
                         ndev->name, ppmax, ppmax_pool, pool_index_max);
index 2ba49e959c3fd391115740988ae44b3c0698b4d5..dc339dc1adb21c30224fbce6eb0d60fd861c9388 100644 (file)
@@ -815,6 +815,14 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
         */
        queue_mapping = skb_get_queue_mapping(skb);
        fq = &priv->fq[queue_mapping];
+
+       fd_len = dpaa2_fd_get_len(&fd);
+       nq = netdev_get_tx_queue(net_dev, queue_mapping);
+       netdev_tx_sent_queue(nq, fd_len);
+
+       /* Everything that happens after this enqueues might race with
+        * the Tx confirmation callback for this frame
+        */
        for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
                err = priv->enqueue(priv, fq, &fd, 0);
                if (err != -EBUSY)
@@ -825,13 +833,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
                percpu_stats->tx_errors++;
                /* Clean up everything, including freeing the skb */
                free_tx_fd(priv, fq, &fd, false);
+               netdev_tx_completed_queue(nq, 1, fd_len);
        } else {
-               fd_len = dpaa2_fd_get_len(&fd);
                percpu_stats->tx_packets++;
                percpu_stats->tx_bytes += fd_len;
-
-               nq = netdev_get_tx_queue(net_dev, queue_mapping);
-               netdev_tx_sent_queue(nq, fd_len);
        }
 
        return NETDEV_TX_OK;
@@ -1817,7 +1822,7 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
        dpaa2_fd_set_format(&fd, dpaa2_fd_single);
        dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
 
-       fq = &priv->fq[smp_processor_id()];
+       fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
        for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
                err = priv->enqueue(priv, fq, &fd, 0);
                if (err != -EBUSY)
index 697c2427f2b70c06c87dd00ae23d4e0b06d4fc3d..a96ad20ee4843e9cdd02c55a3e7286a51679efea 100644 (file)
@@ -1840,13 +1840,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
        int ret;
 
        if (enable) {
-               ret = clk_prepare_enable(fep->clk_ahb);
-               if (ret)
-                       return ret;
-
                ret = clk_prepare_enable(fep->clk_enet_out);
                if (ret)
-                       goto failed_clk_enet_out;
+                       return ret;
 
                if (fep->clk_ptp) {
                        mutex_lock(&fep->ptp_clk_mutex);
@@ -1866,7 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
 
                phy_reset_after_clk_enable(ndev->phydev);
        } else {
-               clk_disable_unprepare(fep->clk_ahb);
                clk_disable_unprepare(fep->clk_enet_out);
                if (fep->clk_ptp) {
                        mutex_lock(&fep->ptp_clk_mutex);
@@ -1885,8 +1880,6 @@ failed_clk_ref:
 failed_clk_ptp:
        if (fep->clk_enet_out)
                clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
-               clk_disable_unprepare(fep->clk_ahb);
 
        return ret;
 }
@@ -3470,6 +3463,9 @@ fec_probe(struct platform_device *pdev)
        ret = clk_prepare_enable(fep->clk_ipg);
        if (ret)
                goto failed_clk_ipg;
+       ret = clk_prepare_enable(fep->clk_ahb);
+       if (ret)
+               goto failed_clk_ahb;
 
        fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
        if (!IS_ERR(fep->reg_phy)) {
@@ -3563,6 +3559,9 @@ failed_reset:
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 failed_regulator:
+       clk_disable_unprepare(fep->clk_ahb);
+failed_clk_ahb:
+       clk_disable_unprepare(fep->clk_ipg);
 failed_clk_ipg:
        fec_enet_clk_enable(ndev, false);
 failed_clk:
@@ -3686,6 +3685,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev)
        struct net_device *ndev = dev_get_drvdata(dev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       clk_disable_unprepare(fep->clk_ahb);
        clk_disable_unprepare(fep->clk_ipg);
 
        return 0;
@@ -3695,8 +3695,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
        struct fec_enet_private *fep = netdev_priv(ndev);
+       int ret;
 
-       return clk_prepare_enable(fep->clk_ipg);
+       ret = clk_prepare_enable(fep->clk_ahb);
+       if (ret)
+               return ret;
+       ret = clk_prepare_enable(fep->clk_ipg);
+       if (ret)
+               goto failed_clk_ipg;
+
+       return 0;
+
+failed_clk_ipg:
+       clk_disable_unprepare(fep->clk_ahb);
+       return ret;
 }
 
 static const struct dev_pm_ops fec_pm_ops = {
index 79d03f8ee7b180d2cab9a2a647254461c0a0cb08..c7fa97a7e1f4d4b07dd6b00f7c5c7bffca4a0356 100644 (file)
@@ -150,7 +150,6 @@ out_buffer_fail:
 /* free desc along with its attached buffer */
 static void hnae_free_desc(struct hnae_ring *ring)
 {
-       hnae_free_buffers(ring);
        dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
                         ring->desc_num * sizeof(ring->desc[0]),
                         ring_to_dma_dir(ring));
@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
 /* fini ring, also free the buffer for the ring */
 static void hnae_fini_ring(struct hnae_ring *ring)
 {
+       if (is_rx_ring(ring))
+               hnae_free_buffers(ring);
+
        hnae_free_desc(ring);
        kfree(ring->desc_cb);
        ring->desc_cb = NULL;
index 08a750fb60c49d397c61845130e153fe1e3b0b3e..d6fb8343723041992d12cfa505f222822bd603fd 100644 (file)
@@ -357,7 +357,7 @@ struct hnae_buf_ops {
 };
 
 struct hnae_queue {
-       void __iomem *io_base;
+       u8 __iomem *io_base;
        phys_addr_t phy_base;
        struct hnae_ae_dev *dev;        /* the device who use this queue */
        struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
index a97228c93831d69fe2211317486f14a25197e740..6c0507921623b843bb2010321782ace35a24b8a3 100644 (file)
@@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
 static void hns_mac_param_get(struct mac_params *param,
                              struct hns_mac_cb *mac_cb)
 {
-       param->vaddr = (void *)mac_cb->vaddr;
+       param->vaddr = mac_cb->vaddr;
        param->mac_mode = hns_get_enet_interface(mac_cb);
        ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
        param->mac_id = mac_cb->mac_id;
index fbc75341bef760b82a1d7a10469d5649db91e366..22589799f1a575127f77f733e9e21a28889efa46 100644 (file)
@@ -187,7 +187,7 @@ struct mac_statistics {
 /*mac para struct ,mac get param from nic or dsaf when initialize*/
 struct mac_params {
        char addr[ETH_ALEN];
-       void *vaddr; /*virtual address*/
+       u8 __iomem *vaddr; /*virtual address*/
        struct device *dev;
        u8 mac_id;
        /**< Ethernet operation mode (MAC-PHY interface and speed) */
@@ -402,7 +402,7 @@ struct mac_driver {
        enum mac_mode mac_mode;
        u8 mac_id;
        struct hns_mac_cb *mac_cb;
-       void __iomem *io_base;
+       u8 __iomem *io_base;
        unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
        unsigned int virt_dev_num;
        struct device *dev;
index ac55db065f167ad58f9ec41966afa7b0299b5f40..61eea6ac846fcfce9d254351221b998dd20a6e47 100644 (file)
@@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key(
                       DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
        dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
                       DSAF_TBL_TCAM_KEY_PORT_S, port);
-
-       mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan);
 }
 
 /**
@@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry(
        /* default config dvc to 0 */
        mac_data.tbl_ucast_dvc = 0;
        mac_data.tbl_ucast_out_port = mac_entry->port_num;
-       tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
-       tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+       tcam_data.tbl_tcam_data_high = mac_key.high.val;
+       tcam_data.tbl_tcam_data_low = mac_key.low.val;
 
        hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
 
@@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
                                     0xff,
                                     mc_mask);
 
-               mask_key.high.val = le32_to_cpu(mask_key.high.val);
-               mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
                pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
        }
 
@@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
                dsaf_dev->ae_dev.name, mac_key.high.val,
                mac_key.low.val, entry_index);
 
-       tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
-       tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+       tcam_data.tbl_tcam_data_high = mac_key.high.val;
+       tcam_data.tbl_tcam_data_low = mac_key.low.val;
 
        /* config mc entry with mask */
        hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
@@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
                /* config key mask */
                hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
 
-               mask_key.high.val = le32_to_cpu(mask_key.high.val);
-               mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
                pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
        }
 
@@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
                soft_mac_entry += entry_index;
                soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
        } else { /* not zero, just del port, update */
-               tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
-               tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+               tcam_data.tbl_tcam_data_high = mac_key.high.val;
+               tcam_data.tbl_tcam_data_low = mac_key.low.val;
 
                hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
                                     &tcam_data,
@@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void)
        return DSAF_DUMP_REGS_NUM;
 }
 
+static int hns_dsaf_get_port_id(u8 port)
+{
+       if (port < DSAF_SERVICE_NW_NUM)
+               return port;
+
+       if (port >= DSAF_BASE_INNER_PORT_NUM)
+               return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+
+       return -EINVAL;
+}
+
 static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
 {
        struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
@@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
        memset(&temp_key, 0x0, sizeof(temp_key));
        mask_entry.addr[0] = 0x01;
        hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
-                            port, mask_entry.addr);
+                            0xf, mask_entry.addr);
        tbl_tcam_mcast.tbl_mcast_item_vld = 1;
        tbl_tcam_mcast.tbl_mcast_old_en = 0;
 
-       if (port < DSAF_SERVICE_NW_NUM) {
-               mskid = port;
-       } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
-               mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
-       } else {
+       /* set MAC port to handle multicast */
+       mskid = hns_dsaf_get_port_id(port);
+       if (mskid == -EINVAL) {
                dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
                        dsaf_dev->ae_dev.name, port,
                        mask_key.high.val, mask_key.low.val);
                return;
        }
+       dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+                    mskid % 32, 1);
 
+       /* set pool bit map to handle multicast */
+       mskid = hns_dsaf_get_port_id(port_num);
+       if (mskid == -EINVAL) {
+               dev_err(dsaf_dev->dev,
+                       "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
+                       dsaf_dev->ae_dev.name, port_num,
+                       mask_key.high.val, mask_key.low.val);
+               return;
+       }
        dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
                     mskid % 32, 1);
+
        memcpy(&temp_key, &mask_key, sizeof(mask_key));
        hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
                                   (struct dsaf_tbl_tcam_data *)(&mask_key),
index 0e1cd99831a6083faa790aa80be1f6c635b15a50..76cc8887e1a83599c9178e88787ecb6c6a2b8784 100644 (file)
@@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
                             u8 mac_id, u8 port_num);
 int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
 
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
+
 #endif /* __HNS_DSAF_MAIN_H__ */
index 16294cd3c95459891c65080cd49c61f839afaa60..19b94879691f86e0ec969e40ed5a057cb24c3d7b 100644 (file)
@@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
                dsaf_set_field(origin, 1ull << 10, 10, en);
                dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
        } else {
-               u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
+               u8 __iomem *base_addr = mac_cb->serdes_vaddr +
                                (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
                dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
        }
index 3d07c8a7639dad46c5b810b19f56ba84fdfc655a..17c019106e6e40d8f281deb87b1928367582e371 100644 (file)
@@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
        }
 }
 
-static void __iomem *
+static u8 __iomem *
 hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
 {
        return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
@@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
        dsaf_dev->ppe_common[comm_index] = NULL;
 }
 
-static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
-                                       int ppe_idx)
+static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
+                                     int ppe_idx)
 {
        return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
 }
index f670e63a5a018cd5b48b4a62093c104905aa4463..110c6e8222c7038a6eb24c1854490845293e320d 100644 (file)
@@ -80,7 +80,7 @@ struct hns_ppe_cb {
        struct hns_ppe_hw_stats hw_stats;
 
        u8 index;       /* index in a ppe common device */
-       void __iomem *io_base;
+       u8 __iomem *io_base;
        int virq;
        u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
        u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */
@@ -89,7 +89,7 @@ struct hns_ppe_cb {
 struct ppe_common_cb {
        struct device *dev;
        struct dsaf_device *dsaf_dev;
-       void __iomem *io_base;
+       u8 __iomem *io_base;
 
        enum ppe_common_mode ppe_mode;
 
index 6bf346c11b25a5c87bceca81abf1bfc83188d768..ac3518ca4d7bec5b3737cef78081202dfcf1f53d 100644 (file)
@@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
                mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
        } else {
                ring = &q->tx_ring;
-               ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base +
+               ring->io_base = ring_pair_cb->q.io_base +
                        HNS_RCB_TX_REG_OFFSET;
                irq_idx = HNS_RCB_IRQ_IDX_TX;
                mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
@@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
        }
 }
 
-static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
+static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
 {
        struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
 
index b9733b0b848263bc9a25ccf42f3d8b433a7b9e5e..b9e7f11f08968099c76a99c3f119e77a909c72ea 100644 (file)
 #define XGMAC_PAUSE_CTL_RSP_MODE_B     2
 #define XGMAC_PAUSE_CTL_TX_XOFF_B      3
 
-static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
+static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value)
 {
        writel(value, base + reg);
 }
@@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
 #define dsaf_set_bit(origin, shift, val) \
        dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
 
-static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask,
                                      u32 shift, u32 val)
 {
        u32 origin = dsaf_read_reg(base, reg);
@@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
 #define dsaf_get_bit(origin, shift) \
        dsaf_get_field((origin), (1ull << (shift)), (shift))
 
-static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask,
                                     u32 shift)
 {
        u32 origin;
@@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
        dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
 
 #define dsaf_write_b(addr, data)\
-       writeb((data), (__iomem unsigned char *)(addr))
+       writeb((data), (__iomem u8 *)(addr))
 #define dsaf_read_b(addr)\
-       readb((__iomem unsigned char *)(addr))
+       readb((__iomem u8 *)(addr))
 
 #define hns_mac_reg_read64(drv, offset) \
-       readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset))))
+       readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset))))
 
 #endif /* _DSAF_REG_H */
index ba4316910dea1726da855c13b78e95bb6bd36a3c..a60f207768fc7152edbbf9a5394afaa080260d8b 100644 (file)
@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
        dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
        dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
        dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
-       dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
+       dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
 }
 
 /**
index 60e7d7ae3787c280d9b21319a1493ccc5d570de8..4cd86ba1f050dcf786238c147cd2fc3b799797bc 100644 (file)
@@ -29,9 +29,6 @@
 
 #define SERVICE_TIMER_HZ (1 * HZ)
 
-#define NIC_TX_CLEAN_MAX_NUM 256
-#define NIC_RX_CLEAN_MAX_NUM 64
-
 #define RCB_IRQ_NOT_INITED 0
 #define RCB_IRQ_INITED 1
 #define HNS_BUFFER_SIZE_2048 2048
@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
        wmb(); /* commit all data before submit */
        assert(skb->queue_mapping < priv->ae_handle->q_num);
        hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
-       ring->stats.tx_pkts++;
-       ring->stats.tx_bytes += skb->len;
 
        return NETDEV_TX_OK;
 
@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
                /* issue prefetch for next Tx descriptor */
                prefetch(&ring->desc_cb[ring->next_to_clean]);
        }
+       /* update tx ring statistics. */
+       ring->stats.tx_pkts += pkts;
+       ring->stats.tx_bytes += bytes;
 
        NETIF_TX_UNLOCK(ring);
 
@@ -2152,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
                        hns_nic_tx_fini_pro_v2;
 
                netif_napi_add(priv->netdev, &rd->napi,
-                              hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+                              hns_nic_common_poll, NAPI_POLL_WEIGHT);
                rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
        }
        for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2165,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
                        hns_nic_rx_fini_pro_v2;
 
                netif_napi_add(priv->netdev, &rd->napi,
-                              hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+                              hns_nic_common_poll, NAPI_POLL_WEIGHT);
                rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
        }
 
index 1c1f17ec6be2dfa11cd1208b0b04273f07855c10..162cb9afa0e705d1e7d668c1cf34eae64f4d65f2 100644 (file)
@@ -22,6 +22,7 @@
 #include "hns3_enet.h"
 
 #define hns3_set_field(origin, shift, val)     ((origin) |= ((val) << (shift)))
+#define hns3_tx_bd_count(S)    DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
 
 static void hns3_clear_all_ring(struct hnae3_handle *h);
 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
@@ -1079,7 +1080,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 
        desc_cb->length = size;
 
-       frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET;
+       frag_buf_num = hns3_tx_bd_count(size);
        sizeoflast = size & HNS3_TX_LAST_SIZE_M;
        sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
 
@@ -1124,14 +1125,13 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
        int i;
 
        size = skb_headlen(skb);
-       buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET;
+       buf_num = hns3_tx_bd_count(size);
 
        frag_num = skb_shinfo(skb)->nr_frags;
        for (i = 0; i < frag_num; i++) {
                frag = &skb_shinfo(skb)->frags[i];
                size = skb_frag_size(frag);
-               bdnum_for_frag = (size + HNS3_MAX_BD_SIZE - 1) >>
-                                HNS3_MAX_BD_SIZE_OFFSET;
+               bdnum_for_frag = hns3_tx_bd_count(size);
                if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
                        return -ENOMEM;
 
@@ -1139,8 +1139,7 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
        }
 
        if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
-               buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >>
-                         HNS3_MAX_BD_SIZE_OFFSET;
+               buf_num = hns3_tx_bd_count(skb->len);
                if (ring_space(ring) < buf_num)
                        return -EBUSY;
                /* manual split the send packet */
@@ -1169,7 +1168,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
        buf_num = skb_shinfo(skb)->nr_frags + 1;
 
        if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
-               buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
+               buf_num = hns3_tx_bd_count(skb->len);
                if (ring_space(ring) < buf_num)
                        return -EBUSY;
                /* manual split the send packet */
index 1db0bd41d20961f931f464850b8e5f1395276f76..75669cd0c31145fd763959f226175452dbb399bf 100644 (file)
@@ -193,7 +193,6 @@ enum hns3_nic_state {
 #define HNS3_VECTOR_INITED                     1
 
 #define HNS3_MAX_BD_SIZE                       65535
-#define HNS3_MAX_BD_SIZE_OFFSET                16
 #define HNS3_MAX_BD_PER_FRAG                   8
 #define HNS3_MAX_BD_PER_PKT                    MAX_SKB_FRAGS
 
index fffe8c1c45d394b2a0723443582d6427ffa6e387..0fb61d440d3bb96a1d5b24b4c5380b3b0c33de4e 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for the HISILICON network device drivers.
 #
 
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
 
 obj-$(CONFIG_HNS3_HCLGE) += hclge.o
 hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o  hclge_debugfs.o
index fb93bbd358455a735880d6e83cd314c0773a636a..6193f8fa7cf34aa142f575ca903f42d666847f31 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for the HISILICON network device drivers.
 #
 
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
 
 obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
 hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
\ No newline at end of file
index baf5cc251f3299499f3fc03ee17513aa740110f6..8b8a7d00e8e0c92d23a9ca67683ce98a0417fddc 100644 (file)
@@ -39,7 +39,7 @@ struct hns_mdio_sc_reg {
 };
 
 struct hns_mdio_device {
-       void *vbase;            /* mdio reg base address */
+       u8 __iomem *vbase;              /* mdio reg base address */
        struct regmap *subctrl_vbase;
        struct hns_mdio_sc_reg sc_reg;
 };
@@ -96,21 +96,17 @@ enum mdio_c45_op_seq {
 #define MDIO_SC_CLK_ST         0x531C
 #define MDIO_SC_RESET_ST       0x5A1C
 
-static void mdio_write_reg(void *base, u32 reg, u32 value)
+static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value)
 {
-       u8 __iomem *reg_addr = (u8 __iomem *)base;
-
-       writel_relaxed(value, reg_addr + reg);
+       writel_relaxed(value, base + reg);
 }
 
 #define MDIO_WRITE_REG(a, reg, value) \
        mdio_write_reg((a)->vbase, (reg), (value))
 
-static u32 mdio_read_reg(void *base, u32 reg)
+static u32 mdio_read_reg(u8 __iomem *base, u32 reg)
 {
-       u8 __iomem *reg_addr = (u8 __iomem *)base;
-
-       return readl_relaxed(reg_addr + reg);
+       return readl_relaxed(base + reg);
 }
 
 #define mdio_set_field(origin, mask, shift, val) \
@@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg)
 
 #define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
 
-static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift,
                               u32 val)
 {
        u32 origin = mdio_read_reg(base, reg);
@@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
 #define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
        mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
 
-static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift)
 {
        u32 origin;
 
index 3baabdc897262698ab23b4bc1dedec22edc89919..90b62c1412c8f4715eaf1ab3ca14a9128f1f9046 100644 (file)
@@ -3160,6 +3160,7 @@ static ssize_t ehea_probe_port(struct device *dev,
 
        if (ehea_add_adapter_mr(adapter)) {
                pr_err("creating MR failed\n");
+               of_node_put(eth_dn);
                return -EIO;
        }
 
index 5ecbb1adcf3b9d45fa756af5682245933aa06eb3..3dfb2d131eb76f29c6129e8f29f477ab7be840db 100644 (file)
@@ -1885,6 +1885,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
         */
        adapter->state = VNIC_PROBED;
 
+       reinit_completion(&adapter->init_done);
        rc = init_crq_queue(adapter);
        if (rc) {
                netdev_err(adapter->netdev,
@@ -3761,6 +3762,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
 {
        struct device *dev = &adapter->vdev->dev;
        struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
+       netdev_features_t old_hw_features = 0;
        union ibmvnic_crq crq;
        int i;
 
@@ -3836,24 +3838,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
        adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
        adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
 
-       adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
+       if (adapter->state != VNIC_PROBING) {
+               old_hw_features = adapter->netdev->hw_features;
+               adapter->netdev->hw_features = 0;
+       }
+
+       adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
 
        if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
-               adapter->netdev->features |= NETIF_F_IP_CSUM;
+               adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
 
        if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
-               adapter->netdev->features |= NETIF_F_IPV6_CSUM;
+               adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
 
        if ((adapter->netdev->features &
            (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
-               adapter->netdev->features |= NETIF_F_RXCSUM;
+               adapter->netdev->hw_features |= NETIF_F_RXCSUM;
 
        if (buf->large_tx_ipv4)
-               adapter->netdev->features |= NETIF_F_TSO;
+               adapter->netdev->hw_features |= NETIF_F_TSO;
        if (buf->large_tx_ipv6)
-               adapter->netdev->features |= NETIF_F_TSO6;
+               adapter->netdev->hw_features |= NETIF_F_TSO6;
+
+       if (adapter->state == VNIC_PROBING) {
+               adapter->netdev->features |= adapter->netdev->hw_features;
+       } else if (old_hw_features != adapter->netdev->hw_features) {
+               netdev_features_t tmp = 0;
 
-       adapter->netdev->hw_features |= adapter->netdev->features;
+               /* disable features no longer supported */
+               adapter->netdev->features &= adapter->netdev->hw_features;
+               /* turn on features now supported if previously enabled */
+               tmp = (old_hw_features ^ adapter->netdev->hw_features) &
+                       adapter->netdev->hw_features;
+               adapter->netdev->features |=
+                               tmp & adapter->netdev->wanted_features;
+       }
 
        memset(&crq, 0, sizeof(crq));
        crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
@@ -4625,7 +4644,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
        old_num_rx_queues = adapter->req_rx_queues;
        old_num_tx_queues = adapter->req_tx_queues;
 
-       init_completion(&adapter->init_done);
+       reinit_completion(&adapter->init_done);
        adapter->init_done_rc = 0;
        ibmvnic_send_crq_init(adapter);
        if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4680,7 +4699,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
 
        adapter->from_passive_init = false;
 
-       init_completion(&adapter->init_done);
        adapter->init_done_rc = 0;
        ibmvnic_send_crq_init(adapter);
        if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4759,6 +4777,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
        INIT_LIST_HEAD(&adapter->rwi_list);
        spin_lock_init(&adapter->rwi_lock);
+       init_completion(&adapter->init_done);
        adapter->resetting = false;
 
        adapter->mac_change_pending = false;
index 5a0419421511fd7a9a3e44a1a7fa2c4e21f26217..ecef949f3baae022d46082f4a1dc32d18cb082ec 100644 (file)
@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
        /* create driver workqueue */
        fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
                                          fm10k_driver_name);
+       if (!fm10k_workqueue)
+               return -ENOMEM;
 
        fm10k_dbg_init();
 
index d684998ba2b03b27230916afb7012951ad987a94..d3cc3427caad187ecf5bf967404f86182082790a 100644 (file)
@@ -790,6 +790,8 @@ struct i40e_vsi {
 
        /* VSI specific handlers */
        irqreturn_t (*irq_handler)(int irq, void *data);
+
+       unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
 } ____cacheline_internodealigned_in_smp;
 
 struct i40e_netdev_priv {
@@ -1096,20 +1098,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
        return !!vsi->xdp_prog;
 }
 
-static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
-{
-       bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
-       int qid = ring->queue_index;
-
-       if (ring_is_xdp(ring))
-               qid -= ring->vsi->alloc_queue_pairs;
-
-       if (!xdp_on)
-               return NULL;
-
-       return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
-}
-
 int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
index 4c885801fa2699432d0eaaaca8c4487081ee36b0..7874d0ec7fb0e1a5dc4c42aeed03d7342e8f4fef 100644 (file)
@@ -2573,8 +2573,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
                return -EOPNOTSUPP;
 
        /* only magic packet is supported */
-       if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)
-                         | (wol->wolopts != WAKE_FILTER))
+       if (wol->wolopts & ~WAKE_MAGIC)
                return -EOPNOTSUPP;
 
        /* is this a new value? */
index da62218eb70ad3f4c95111c3c67f3a4dd541aa50..b1c265012c8ad03d65a6eb56e1a0d5e42371c6be 100644 (file)
@@ -3063,6 +3063,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
                            ring->queue_index);
 }
 
+/**
+ * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
+ * @ring: The Tx or Rx ring
+ *
+ * Returns the UMEM or NULL.
+ **/
+static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+{
+       bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
+       int qid = ring->queue_index;
+
+       if (ring_is_xdp(ring))
+               qid -= ring->vsi->alloc_queue_pairs;
+
+       if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
+               return NULL;
+
+       return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
+}
+
 /**
  * i40e_configure_tx_ring - Configure a transmit ring context and rest
  * @ring: The Tx ring to configure
@@ -10064,6 +10084,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
        hash_init(vsi->mac_filter_hash);
        vsi->irqs_ready = false;
 
+       if (type == I40E_VSI_MAIN) {
+               vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
+               if (!vsi->af_xdp_zc_qps)
+                       goto err_rings;
+       }
+
        ret = i40e_set_num_rings_in_vsi(vsi);
        if (ret)
                goto err_rings;
@@ -10082,6 +10108,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
        goto unlock_pf;
 
 err_rings:
+       bitmap_free(vsi->af_xdp_zc_qps);
        pf->next_vsi = i - 1;
        kfree(vsi);
 unlock_pf:
@@ -10162,6 +10189,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
        i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
        i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
 
+       bitmap_free(vsi->af_xdp_zc_qps);
        i40e_vsi_free_arrays(vsi, true);
        i40e_clear_rss_config_user(vsi);
 
index 5fb4353c742b9038d3ac7f867163d35cd3c3142d..31575c0bb884f2b0520b848d29420dc98438eba4 100644 (file)
@@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
        struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
-       struct timespec64 now;
+       struct timespec64 now, then;
 
+       then = ns_to_timespec64(delta);
        mutex_lock(&pf->tmreg_lock);
 
        i40e_ptp_read(pf, &now, NULL);
-       timespec64_add_ns(&now, delta);
+       now = timespec64_add(now, then);
        i40e_ptp_write(pf, (const struct timespec64 *)&now);
 
        mutex_unlock(&pf->tmreg_lock);
index b5c182e688e351eed227f867a8eaebaa6349bb07..1b17486543ac7e078a8723512943bab1c50d6d46 100644 (file)
@@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
        if (err)
                return err;
 
+       set_bit(qid, vsi->af_xdp_zc_qps);
+
        if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
 
        if (if_running) {
@@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
                        return err;
        }
 
+       clear_bit(qid, vsi->af_xdp_zc_qps);
        i40e_xsk_umem_dma_unmap(vsi, umem);
 
        if (if_running) {
index 01fcfc6f341519c1d8a67b5c8695ec9c04842b0c..d2e2c50ce257941491e4d0a96603808310357999 100644 (file)
 /* enable link status from external LINK_0 and LINK_1 pins */
 #define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
 #define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000  /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
 #define E1000_CTRL_SDP0_DIR 0x00400000  /* SDP0 Data direction */
 #define E1000_CTRL_SDP1_DIR 0x00800000  /* SDP1 Data direction */
 #define E1000_CTRL_RST      0x04000000  /* Global reset */
index 69b230c53fed537464ba79b0be3ede75ab13f1da..3269d8e94744f61808893267e1e305122bedd3c5 100644 (file)
@@ -8740,9 +8740,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
        struct e1000_hw *hw = &adapter->hw;
        u32 ctrl, rctl, status;
        u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
-#ifdef CONFIG_PM
-       int retval = 0;
-#endif
+       bool wake;
 
        rtnl_lock();
        netif_device_detach(netdev);
@@ -8755,14 +8753,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
        igb_clear_interrupt_scheme(adapter);
        rtnl_unlock();
 
-#ifdef CONFIG_PM
-       if (!runtime) {
-               retval = pci_save_state(pdev);
-               if (retval)
-                       return retval;
-       }
-#endif
-
        status = rd32(E1000_STATUS);
        if (status & E1000_STATUS_LU)
                wufc &= ~E1000_WUFC_LNKC;
@@ -8779,10 +8769,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
                }
 
                ctrl = rd32(E1000_CTRL);
-               /* advertise wake from D3Cold */
-               #define E1000_CTRL_ADVD3WUC 0x00100000
-               /* phy power management enable */
-               #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
                ctrl |= E1000_CTRL_ADVD3WUC;
                wr32(E1000_CTRL, ctrl);
 
@@ -8796,12 +8782,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
                wr32(E1000_WUFC, 0);
        }
 
-       *enable_wake = wufc || adapter->en_mng_pt;
-       if (!*enable_wake)
+       wake = wufc || adapter->en_mng_pt;
+       if (!wake)
                igb_power_down_link(adapter);
        else
                igb_power_up_link(adapter);
 
+       if (enable_wake)
+               *enable_wake = wake;
+
        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
         * would have already happened in close and is redundant.
         */
@@ -8844,22 +8833,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
 
 static int __maybe_unused igb_suspend(struct device *dev)
 {
-       int retval;
-       bool wake;
-       struct pci_dev *pdev = to_pci_dev(dev);
-
-       retval = __igb_shutdown(pdev, &wake, 0);
-       if (retval)
-               return retval;
-
-       if (wake) {
-               pci_prepare_to_sleep(pdev);
-       } else {
-               pci_wake_from_d3(pdev, false);
-               pci_set_power_state(pdev, PCI_D3hot);
-       }
-
-       return 0;
+       return __igb_shutdown(to_pci_dev(dev), NULL, 0);
 }
 
 static int __maybe_unused igb_resume(struct device *dev)
@@ -8930,22 +8904,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
 
 static int __maybe_unused igb_runtime_suspend(struct device *dev)
 {
-       struct pci_dev *pdev = to_pci_dev(dev);
-       int retval;
-       bool wake;
-
-       retval = __igb_shutdown(pdev, &wake, 1);
-       if (retval)
-               return retval;
-
-       if (wake) {
-               pci_prepare_to_sleep(pdev);
-       } else {
-               pci_wake_from_d3(pdev, false);
-               pci_set_power_state(pdev, PCI_D3hot);
-       }
-
-       return 0;
+       return __igb_shutdown(to_pci_dev(dev), NULL, 1);
 }
 
 static int __maybe_unused igb_runtime_resume(struct device *dev)
index cc4907f9ff02c3faecba89c524674f33581d2346..2fb97967961c43893b2d06fef0df9bc476dda426 100644 (file)
@@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
        struct pci_dev *pdev = adapter->pdev;
        struct device *dev = &adapter->netdev->dev;
        struct mii_bus *bus;
+       int err = -ENODEV;
 
-       adapter->mii_bus = devm_mdiobus_alloc(dev);
-       if (!adapter->mii_bus)
+       bus = devm_mdiobus_alloc(dev);
+       if (!bus)
                return -ENOMEM;
 
-       bus = adapter->mii_bus;
-
        switch (hw->device_id) {
        /* C3000 SoCs */
        case IXGBE_DEV_ID_X550EM_A_KR:
@@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
         */
        hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
 
-       return mdiobus_register(bus);
+       err = mdiobus_register(bus);
+       if (!err) {
+               adapter->mii_bus = bus;
+               return 0;
+       }
 
 ixgbe_no_mii_bus:
        devm_mdiobus_free(dev, bus);
-       adapter->mii_bus = NULL;
-       return -ENODEV;
+       return err;
 }
 
 /**
index 71c65cc1790484b8e24e4d02ec973b830179c7de..d3eaf2ceaa3979b79a65d83beaaf072567d48c00 100644 (file)
@@ -858,6 +858,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs);
  * switching channels
  */
 typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
+int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
 int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
                               struct mlx5e_channels *new_chs,
                               mlx5e_fp_hw_modify hw_modify);
index 122927f3a6005b2be70c694ead0a6d4a139069c6..d5e5afbdca6dcbacb776571c97eef74e033cf356 100644 (file)
@@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
        if (!eproto)
                return -EINVAL;
 
-       if (ext !=  MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet))
-               return -EOPNOTSUPP;
-
        err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
        if (err)
                return err;
index eac245a93f918c588dc8237e1af5996f3d0f73f0..4ab0d030b54486f67096a190471a747b1ac18c56 100644 (file)
@@ -122,7 +122,9 @@ out:
        return err;
 }
 
-/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
+/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
+ * minimum speed value is 40Gbps
+ */
 static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
 {
        u32 speed;
@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
        int err;
 
        err = mlx5e_port_linkspeed(priv->mdev, &speed);
-       if (err) {
-               mlx5_core_warn(priv->mdev, "cannot get port speed\n");
-               return 0;
-       }
+       if (err)
+               speed = SPEED_40000;
+       speed = max_t(u32, speed, SPEED_40000);
 
        xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
 
@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
 }
 
 static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
-                                u32 xoff, unsigned int mtu)
+                                u32 xoff, unsigned int max_mtu)
 {
        int i;
 
@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
                }
 
                if (port_buffer->buffer[i].size <
-                   (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
+                   (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
                        return -ENOMEM;
 
                port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
-               port_buffer->buffer[i].xon  = port_buffer->buffer[i].xoff - mtu;
+               port_buffer->buffer[i].xon  =
+                       port_buffer->buffer[i].xoff - max_mtu;
        }
 
        return 0;
@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
 
 /**
  * update_buffer_lossy()
- *   mtu: device's MTU
+ *   max_mtu: netdev's max_mtu
  *   pfc_en: <input> current pfc configuration
  *   buffer: <input> current prio to buffer mapping
  *   xoff:   <input> xoff value
@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
  *     Return 0 if no error.
  *     Set change to true if buffer configuration is modified.
  */
-static int update_buffer_lossy(unsigned int mtu,
+static int update_buffer_lossy(unsigned int max_mtu,
                               u8 pfc_en, u8 *buffer, u32 xoff,
                               struct mlx5e_port_buffer *port_buffer,
                               bool *change)
@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
        }
 
        if (changed) {
-               err = update_xoff_threshold(port_buffer, xoff, mtu);
+               err = update_xoff_threshold(port_buffer, xoff, max_mtu);
                if (err)
                        return err;
 
@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
        return 0;
 }
 
+#define MINIMUM_MAX_MTU 9216
 int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                                    u32 change, unsigned int mtu,
                                    struct ieee_pfc *pfc,
@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
        bool update_prio2buffer = false;
        u8 buffer[MLX5E_MAX_PRIORITY];
        bool update_buffer = false;
+       unsigned int max_mtu;
        u32 total_used = 0;
        u8 curr_pfc_en;
        int err;
        int i;
 
        mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
+       max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
 
        err = mlx5e_port_query_buffer(priv, &port_buffer);
        if (err)
@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
 
        if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
                if (err)
                        return err;
        }
@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                if (err)
                        return err;
 
-               err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
+               err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
                                          &port_buffer, &update_buffer);
                if (err)
                        return err;
@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                if (err)
                        return err;
 
-               err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
-                                         &port_buffer, &update_buffer);
+               err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
+                                         xoff, &port_buffer, &update_buffer);
                if (err)
                        return err;
        }
@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                        return -EINVAL;
 
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
                if (err)
                        return err;
        }
@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
        /* Need to update buffer configuration if xoff value is changed */
        if (!update_buffer && xoff != priv->dcbx.xoff) {
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
                if (err)
                        return err;
        }
index 9d38e62cdf248a2f624b12227133f8132f7591bd..476dd97f7f2f25a4c0697a6ac2b34c0b5985034e 100644 (file)
@@ -186,12 +186,17 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx)
 
 static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv)
 {
-       int err;
+       int err = 0;
 
        rtnl_lock();
        mutex_lock(&priv->state_lock);
-       mlx5e_close_locked(priv->netdev);
-       err = mlx5e_open_locked(priv->netdev);
+
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+               goto out;
+
+       err = mlx5e_safe_reopen_channels(priv);
+
+out:
        mutex_unlock(&priv->state_lock);
        rtnl_unlock();
 
index fa2a3c444cdc604c308999f140a3125becd9c8d3..eec07b34b4ad07c627eebf36f87557296cc1701a 100644 (file)
@@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
                        return -EOPNOTSUPP;
        }
 
+       if (!(mlx5e_eswitch_rep(*out_dev) &&
+             mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
+               return -EOPNOTSUPP;
+
        return 0;
 }
 
index 03b2a9f9c5895af92bcefad0b3525757aa0191c1..cad34d6f5f451b1bfdf4b363ef25cbe50cab9fcd 100644 (file)
 #include <linux/bpf_trace.h>
 #include "en/xdp.h"
 
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params)
+{
+       int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
+
+       /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)).
+        * The condition checked in mlx5e_rx_is_linear_skb is:
+        *   SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE         (1)
+        *   (Note that hw_mtu == sw_mtu + hard_mtu.)
+        * What is returned from this function is:
+        *   max_mtu = PAGE_SIZE - S - hr - hard_mtu                         (2)
+        * After assigning sw_mtu := max_mtu, the left side of (1) turns to
+        * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE,
+        * because both PAGE_SIZE and S are already aligned. Any number greater
+        * than max_mtu would make the left side of (1) greater than PAGE_SIZE,
+        * so max_mtu is the maximum MTU allowed.
+        */
+
+       return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr));
+}
+
 static inline bool
 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
                    struct xdp_buff *xdp)
@@ -304,9 +324,9 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
                                        mlx5e_xdpi_fifo_pop(xdpi_fifo);
 
                                if (is_redirect) {
-                                       xdp_return_frame(xdpi.xdpf);
                                        dma_unmap_single(sq->pdev, xdpi.dma_addr,
                                                         xdpi.xdpf->len, DMA_TO_DEVICE);
+                                       xdp_return_frame(xdpi.xdpf);
                                } else {
                                        /* Recycle RX page */
                                        mlx5e_page_release(rq, &xdpi.di, true);
@@ -345,9 +365,9 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
                                mlx5e_xdpi_fifo_pop(xdpi_fifo);
 
                        if (is_redirect) {
-                               xdp_return_frame(xdpi.xdpf);
                                dma_unmap_single(sq->pdev, xdpi.dma_addr,
                                                 xdpi.xdpf->len, DMA_TO_DEVICE);
+                               xdp_return_frame(xdpi.xdpf);
                        } else {
                                /* Recycle RX page */
                                mlx5e_page_release(rq, &xdpi.di, false);
index ee27a7c8cd87d5121361f22344b53a21a7fb408b..553956cadc8a00d6bed384aa6cdad86759d5b2dd 100644 (file)
 
 #include "en.h"
 
-#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \
-                                MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM)))
 #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
 #define MLX5E_XDP_TX_EMPTY_DS_COUNT \
        (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
 #define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
 
+int mlx5e_xdp_max_mtu(struct mlx5e_params *params);
 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
                      void *va, u16 *rx_headroom, u32 *len);
 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq);
index 3078491cc0d0678a6f1867373c752ed95496a59e..1539cf3de5dc97a180d7bdcb9fe2c5ec79db93c4 100644 (file)
@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
        if (err)
                return err;
 
+       mutex_lock(&mdev->mlx5e_res.td.list_lock);
        list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
+       mutex_unlock(&mdev->mlx5e_res.td.list_lock);
 
        return 0;
 }
@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
 void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
                       struct mlx5e_tir *tir)
 {
+       mutex_lock(&mdev->mlx5e_res.td.list_lock);
        mlx5_core_destroy_tir(mdev, tir->tirn);
        list_del(&tir->list);
+       mutex_unlock(&mdev->mlx5e_res.td.list_lock);
 }
 
 static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
        }
 
        INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
+       mutex_init(&mdev->mlx5e_res.td.list_lock);
 
        return 0;
 
@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5e_tir *tir;
-       int err  = -ENOMEM;
+       int err  = 0;
        u32 tirn = 0;
        int inlen;
        void *in;
 
        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
        in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in)
+       if (!in) {
+               err = -ENOMEM;
                goto out;
+       }
 
        if (enable_uc_lb)
                MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
 
        MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
 
+       mutex_lock(&mdev->mlx5e_res.td.list_lock);
        list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
                tirn = tir->tirn;
                err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
@@ -168,6 +176,7 @@ out:
        kvfree(in);
        if (err)
                netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
+       mutex_unlock(&mdev->mlx5e_res.td.list_lock);
 
        return err;
 }
index a0987cc5fe4a12af0bf0155ad8f290153898518c..78dc8fe2a83c3499d290ceffcb273ebcd7ef1932 100644 (file)
@@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
                          __ETHTOOL_LINK_MODE_MASK_NBITS);
 }
 
-static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev,
-                                   unsigned long *advertising_modes,
-                                   u32 eth_proto_cap)
+static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
+                                   u32 eth_proto_cap, bool ext)
 {
        unsigned long proto_cap = eth_proto_cap;
        struct ptys2ethtool_config *table;
        u32 max_size;
        int proto;
 
-       mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
+       table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
+       max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
+                        ARRAY_SIZE(ptys2legacy_ethtool_table);
+
        for_each_set_bit(proto, &proto_cap, max_size)
                bitmap_or(advertising_modes, advertising_modes,
                          table[proto].advertised,
@@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
        ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
 }
 
-static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
-                           u8 tx_pause, u8 rx_pause,
-                           struct ethtool_link_ksettings *link_ksettings)
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
+                           struct ethtool_link_ksettings *link_ksettings,
+                           bool ext)
 {
        unsigned long *advertising = link_ksettings->link_modes.advertising;
-       ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap);
+       ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
 
        if (rx_pause)
                ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
@@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
                               struct ethtool_link_ksettings *link_ksettings)
 {
        unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
+       bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
 
-       ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp);
+       ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
 }
 
 int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
        u8 an_disable_admin;
        u8 an_status;
        u8 connector_type;
+       bool admin_ext;
        bool ext;
        int err;
 
@@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
                                              eth_proto_capability);
        eth_proto_admin  = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
                                              eth_proto_admin);
+       /* Fields: eth_proto_admin and ext_eth_proto_admin  are
+        * mutually exclusive. Hence try reading legacy advertising
+        * when extended advertising is zero.
+        * admin_ext indicates how eth_proto_admin should be
+        * interpreted
+        */
+       admin_ext = ext;
+       if (ext && !eth_proto_admin) {
+               eth_proto_admin  = MLX5_GET_ETH_PROTO(ptys_reg, out, false,
+                                                     eth_proto_admin);
+               admin_ext = false;
+       }
+
        eth_proto_oper   = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
                                              eth_proto_oper);
        eth_proto_lp        = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
@@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
        ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
 
        get_supported(mdev, eth_proto_cap, link_ksettings);
-       get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings);
+       get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
+                       admin_ext);
        get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
 
        eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
 
 #define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
 
-       ext_requested = (link_ksettings->link_modes.advertising[0] >
-                       MLX5E_PTYS_EXT);
+       ext_requested = !!(link_ksettings->link_modes.advertising[0] >
+                       MLX5E_PTYS_EXT ||
+                       link_ksettings->link_modes.advertising[1]);
        ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
-
-       /*when ptys_extended_ethernet is set legacy link modes are deprecated */
-       if (ext_requested != ext_supported)
-               return -EPROTONOSUPPORT;
+       ext_requested &= ext_supported;
 
        speed = link_ksettings->base.speed;
        ethtool2ptys_adver_func = ext_requested ?
                                  mlx5e_ethtool2ptys_ext_adver_link :
                                  mlx5e_ethtool2ptys_adver_link;
-       err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto);
+       err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
        if (err) {
                netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
                           __func__, err);
@@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
        if (!an_changes && link_modes == eproto.admin)
                goto out;
 
-       mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported);
+       mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
        mlx5_toggle_port_link(mdev);
 
 out:
@@ -1570,7 +1586,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
                break;
        case MLX5_MODULE_ID_SFP:
                modinfo->type       = ETH_MODULE_SFF_8472;
-               modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+               modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
                break;
        default:
                netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
@@ -1752,7 +1768,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
        struct mlx5e_channel *c;
        int i;
 
-       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state) ||
+           priv->channels.params.xdp_prog)
                return 0;
 
        for (i = 0; i < channels->num; i++) {
index b5fdbd3190d9fa99e6207c25e183354b1d5dad01..46157e2a1e5ac36121f8ec96f9f5a09417b5fa67 100644 (file)
@@ -951,7 +951,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
        if (params->rx_dim_enabled)
                __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
 
-       if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE))
+       /* We disable csum_complete when XDP is enabled since
+        * XDP programs might manipulate packets which will render
+        * skb->checksum incorrect.
+        */
+       if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
                __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
 
        return 0;
@@ -2937,6 +2941,14 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
        return 0;
 }
 
+int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
+{
+       struct mlx5e_channels new_channels = {};
+
+       new_channels.params = priv->channels.params;
+       return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+}
+
 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
 {
        priv->tstamp.tx_type   = HWTSTAMP_TX_OFF;
@@ -3765,7 +3777,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
        if (params->xdp_prog &&
            !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
                netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
-                          new_mtu, MLX5E_XDP_MAX_MTU);
+                          new_mtu, mlx5e_xdp_max_mtu(params));
                err = -EINVAL;
                goto out;
        }
@@ -4161,11 +4173,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
        if (!report_failed)
                goto unlock;
 
-       mlx5e_close_locked(priv->netdev);
-       err = mlx5e_open_locked(priv->netdev);
+       err = mlx5e_safe_reopen_channels(priv);
        if (err)
                netdev_err(priv->netdev,
-                          "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+                          "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
                           err);
 
 unlock:
@@ -4201,7 +4212,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
 
        if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
                netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
-                           new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU);
+                           new_channels.params.sw_mtu,
+                           mlx5e_xdp_max_mtu(&new_channels.params));
                return -EINVAL;
        }
 
@@ -4553,7 +4565,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
 {
        enum mlx5e_traffic_types tt;
 
-       rss_params->hfunc = ETH_RSS_HASH_XOR;
+       rss_params->hfunc = ETH_RSS_HASH_TOP;
        netdev_rss_key_fill(rss_params->toeplitz_hash_key,
                            sizeof(rss_params->toeplitz_hash_key));
        mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
index 3dde5c7e0739afd6d04f874290d5a332c97f68cf..c3b3002ff62f073f8c9fff88ea2fb74693474619 100644 (file)
@@ -692,7 +692,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
 {
        *proto = ((struct ethhdr *)skb->data)->h_proto;
        *proto = __vlan_get_protocol(skb, *proto, network_depth);
-       return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6));
+
+       if (*proto == htons(ETH_P_IP))
+               return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
+
+       if (*proto == htons(ETH_P_IPV6))
+               return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
+
+       return false;
 }
 
 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
@@ -712,17 +719,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
        rq->stats->ecn_mark += !!rc;
 }
 
-static u32 mlx5e_get_fcs(const struct sk_buff *skb)
-{
-       const void *fcs_bytes;
-       u32 _fcs_bytes;
-
-       fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
-                                      ETH_FCS_LEN, &_fcs_bytes);
-
-       return __get_unaligned_cpu32(fcs_bytes);
-}
-
 static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
 {
        void *ip_p = skb->data + network_depth;
@@ -733,6 +729,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
 
 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
 
+#define MAX_PADDING 8
+
+static void
+tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
+                      struct mlx5e_rq_stats *stats)
+{
+       stats->csum_complete_tail_slow++;
+       skb->csum = csum_block_add(skb->csum,
+                                  skb_checksum(skb, offset, len, 0),
+                                  offset);
+}
+
+static void
+tail_padding_csum(struct sk_buff *skb, int offset,
+                 struct mlx5e_rq_stats *stats)
+{
+       u8 tail_padding[MAX_PADDING];
+       int len = skb->len - offset;
+       void *tail;
+
+       if (unlikely(len > MAX_PADDING)) {
+               tail_padding_csum_slow(skb, offset, len, stats);
+               return;
+       }
+
+       tail = skb_header_pointer(skb, offset, len, tail_padding);
+       if (unlikely(!tail)) {
+               tail_padding_csum_slow(skb, offset, len, stats);
+               return;
+       }
+
+       stats->csum_complete_tail++;
+       skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
+}
+
+static void
+mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto,
+                      struct mlx5e_rq_stats *stats)
+{
+       struct ipv6hdr *ip6;
+       struct iphdr   *ip4;
+       int pkt_len;
+
+       switch (proto) {
+       case htons(ETH_P_IP):
+               ip4 = (struct iphdr *)(skb->data + network_depth);
+               pkt_len = network_depth + ntohs(ip4->tot_len);
+               break;
+       case htons(ETH_P_IPV6):
+               ip6 = (struct ipv6hdr *)(skb->data + network_depth);
+               pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
+               break;
+       default:
+               return;
+       }
+
+       if (likely(pkt_len >= skb->len))
+               return;
+
+       tail_padding_csum(skb, pkt_len, stats);
+}
+
 static inline void mlx5e_handle_csum(struct net_device *netdev,
                                     struct mlx5_cqe64 *cqe,
                                     struct mlx5e_rq *rq,
@@ -752,7 +810,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                return;
        }
 
-       if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
+       /* True when explicitly set via priv flag, or XDP prog is loaded */
+       if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
                goto csum_unnecessary;
 
        /* CQE csum doesn't cover padding octets in short ethernet
@@ -780,18 +839,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                        skb->csum = csum_partial(skb->data + ETH_HLEN,
                                                 network_depth - ETH_HLEN,
                                                 skb->csum);
-               if (unlikely(netdev->features & NETIF_F_RXFCS))
-                       skb->csum = csum_block_add(skb->csum,
-                                                  (__force __wsum)mlx5e_get_fcs(skb),
-                                                  skb->len - ETH_FCS_LEN);
+
+               mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
                stats->csum_complete++;
                return;
        }
 
 csum_unnecessary:
        if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
-                  ((cqe->hds_ip_ext & CQE_L4_OK) ||
-                   (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) {
+                  (cqe->hds_ip_ext & CQE_L4_OK))) {
                skb->ip_summed = CHECKSUM_UNNECESSARY;
                if (cqe_is_tunneled(cqe)) {
                        skb->csum_level = 1;
index 1a78e05cbba8168d919bfd45af3378becd3c9b68..b75aa8b8bf04eac8cac464c0c8550013154f6267 100644 (file)
@@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
@@ -151,6 +153,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
                s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
                s->rx_csum_none += rq_stats->csum_none;
                s->rx_csum_complete += rq_stats->csum_complete;
+               s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
+               s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
                s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
                s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
                s->rx_xdp_drop     += rq_stats->xdp_drop;
@@ -1190,6 +1194,8 @@ static const struct counter_desc rq_stats_desc[] = {
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
index 4640d4f986f8c6495bc5c94cf22217fb59a64b34..16c3b785f282b109e9b2bc54bd4c136095be9b3f 100644 (file)
@@ -71,6 +71,8 @@ struct mlx5e_sw_stats {
        u64 rx_csum_unnecessary;
        u64 rx_csum_none;
        u64 rx_csum_complete;
+       u64 rx_csum_complete_tail;
+       u64 rx_csum_complete_tail_slow;
        u64 rx_csum_unnecessary_inner;
        u64 rx_xdp_drop;
        u64 rx_xdp_redirect;
@@ -181,6 +183,8 @@ struct mlx5e_rq_stats {
        u64 packets;
        u64 bytes;
        u64 csum_complete;
+       u64 csum_complete_tail;
+       u64 csum_complete_tail_slow;
        u64 csum_unnecessary;
        u64 csum_unnecessary_inner;
        u64 csum_none;
index b4967a0ff8c7ba6ce51826e4ae61964a52bfebb5..d75dc44eb2ff63482820cd4d317af4577fcc1fbd 100644 (file)
@@ -2158,6 +2158,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
        return true;
 }
 
+struct ip_ttl_word {
+       __u8    ttl;
+       __u8    protocol;
+       __sum16 check;
+};
+
+struct ipv6_hoplimit_word {
+       __be16  payload_len;
+       __u8    nexthdr;
+       __u8    hop_limit;
+};
+
+static bool is_action_keys_supported(const struct flow_action_entry *act)
+{
+       u32 mask, offset;
+       u8 htype;
+
+       htype = act->mangle.htype;
+       offset = act->mangle.offset;
+       mask = ~act->mangle.mask;
+       /* For IPv4 & IPv6 header check 4 byte word,
+        * to determine that modified fields
+        * are NOT ttl & hop_limit only.
+        */
+       if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
+               struct ip_ttl_word *ttl_word =
+                       (struct ip_ttl_word *)&mask;
+
+               if (offset != offsetof(struct iphdr, ttl) ||
+                   ttl_word->protocol ||
+                   ttl_word->check) {
+                       return true;
+               }
+       } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+               struct ipv6_hoplimit_word *hoplimit_word =
+                       (struct ipv6_hoplimit_word *)&mask;
+
+               if (offset != offsetof(struct ipv6hdr, payload_len) ||
+                   hoplimit_word->payload_len ||
+                   hoplimit_word->nexthdr) {
+                       return true;
+               }
+       }
+       return false;
+}
+
 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
                                          struct flow_action *flow_action,
                                          u32 actions,
@@ -2165,9 +2211,9 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
 {
        const struct flow_action_entry *act;
        bool modify_ip_header;
-       u8 htype, ip_proto;
        void *headers_v;
        u16 ethertype;
+       u8 ip_proto;
        int i;
 
        if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
@@ -2187,9 +2233,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
                    act->id != FLOW_ACTION_ADD)
                        continue;
 
-               htype = act->mangle.htype;
-               if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
-                   htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+               if (is_action_keys_supported(act)) {
                        modify_ip_header = true;
                        break;
                }
@@ -2340,15 +2384,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
        return 0;
 }
 
-static inline int cmp_encap_info(struct ip_tunnel_key *a,
-                                struct ip_tunnel_key *b)
+struct encap_key {
+       struct ip_tunnel_key *ip_tun_key;
+       int tunnel_type;
+};
+
+static inline int cmp_encap_info(struct encap_key *a,
+                                struct encap_key *b)
 {
-       return memcmp(a, b, sizeof(*a));
+       return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
+              a->tunnel_type != b->tunnel_type;
 }
 
-static inline int hash_encap_info(struct ip_tunnel_key *key)
+static inline int hash_encap_info(struct encap_key *key)
 {
-       return jhash(key, sizeof(*key), 0);
+       return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
+                    key->tunnel_type);
 }
 
 
@@ -2379,7 +2430,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        struct mlx5_esw_flow_attr *attr = flow->esw_attr;
        struct mlx5e_tc_flow_parse_attr *parse_attr;
        struct ip_tunnel_info *tun_info;
-       struct ip_tunnel_key *key;
+       struct encap_key key, e_key;
        struct mlx5e_encap_entry *e;
        unsigned short family;
        uintptr_t hash_key;
@@ -2389,13 +2440,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        parse_attr = attr->parse_attr;
        tun_info = &parse_attr->tun_info[out_index];
        family = ip_tunnel_info_af(tun_info);
-       key = &tun_info->key;
+       key.ip_tun_key = &tun_info->key;
+       key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
 
-       hash_key = hash_encap_info(key);
+       hash_key = hash_encap_info(&key);
 
        hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
                                   encap_hlist, hash_key) {
-               if (!cmp_encap_info(&e->tun_info.key, key)) {
+               e_key.ip_tun_key = &e->tun_info.key;
+               e_key.tunnel_type = e->tunnel_type;
+               if (!cmp_encap_info(&e_key, &key)) {
                        found = true;
                        break;
                }
@@ -2657,7 +2711,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
 
        if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
            hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
-               err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
+               err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
                                            parse_attr, hdrs, extack);
                if (err)
                        return err;
index ecd2c747f7260306fd972478ecce71610918e3b3..8a67fd197b7923f67af1872eae280e0f5e3eb663 100644 (file)
@@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
                 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
        MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
        MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
-       if (vport)
-               MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+       MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
        nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
                                     in, nic_vport_context);
 
@@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
        MLX5_SET(modify_esw_vport_context_in, in, opcode,
                 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
        MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
-       if (vport)
-               MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+       MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
        return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
 }
 
@@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
 {
        int err;
 
+       memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
+
        err = esw_create_legacy_vepa_table(esw);
        if (err)
                return err;
@@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
 
        /* Star rule to forward all traffic to uplink vport */
        memset(spec, 0, sizeof(*spec));
+       memset(&dest, 0, sizeof(dest));
        dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
        dest.vport.num = MLX5_VPORT_UPLINK;
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
index f2260391be5b952478175de9be8235b3154c4ccf..9b2d78ee22b88333fcc0b043d4234c340dcb3c32 100644 (file)
@@ -1611,6 +1611,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
 {
        int err;
 
+       memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
        mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
 
        err = esw_create_offloads_fdb_tables(esw, nvports);
index 5cf5f2a9d51fec724f4fac709e29e40f4110d5f7..22a2ef11151441c3abcfc07e7a6e66e292563cae 100644 (file)
@@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
        return ret;
 }
 
-static void mlx5_fpga_tls_release_swid(struct idr *idr,
-                                      spinlock_t *idr_spinlock, u32 swid)
+static void *mlx5_fpga_tls_release_swid(struct idr *idr,
+                                       spinlock_t *idr_spinlock, u32 swid)
 {
        unsigned long flags;
+       void *ptr;
 
        spin_lock_irqsave(idr_spinlock, flags);
-       idr_remove(idr, swid);
+       ptr = idr_remove(idr, swid);
        spin_unlock_irqrestore(idr_spinlock, flags);
+       return ptr;
 }
 
 static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
@@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
        kfree(buf);
 }
 
-struct mlx5_teardown_stream_context {
-       struct mlx5_fpga_tls_command_context cmd;
-       u32 swid;
-};
-
 static void
 mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
                                  struct mlx5_fpga_device *fdev,
                                  struct mlx5_fpga_tls_command_context *cmd,
                                  struct mlx5_fpga_dma_buf *resp)
 {
-       struct mlx5_teardown_stream_context *ctx =
-                   container_of(cmd, struct mlx5_teardown_stream_context, cmd);
-
        if (resp) {
                u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
 
@@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
                        mlx5_fpga_err(fdev,
                                      "Teardown stream failed with syndrome = %d",
                                      syndrome);
-               else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
-                       mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
-                                                  &fdev->tls->tx_idr_spinlock,
-                                                  ctx->swid);
-               else
-                       mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
-                                                  &fdev->tls->rx_idr_spinlock,
-                                                  ctx->swid);
        }
        mlx5_fpga_tls_put_command_ctx(cmd);
 }
@@ -225,8 +211,14 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
 
        rcu_read_lock();
        flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
-       rcu_read_unlock();
+       if (unlikely(!flow)) {
+               rcu_read_unlock();
+               WARN_ONCE(1, "Received NULL pointer for handle\n");
+               kfree(buf);
+               return -EINVAL;
+       }
        mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+       rcu_read_unlock();
 
        MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
        MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
@@ -238,6 +230,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
        buf->complete = mlx_tls_kfree_complete;
 
        ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
+       if (ret < 0)
+               kfree(buf);
 
        return ret;
 }
@@ -245,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
 static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
                                            void *flow, u32 swid, gfp_t flags)
 {
-       struct mlx5_teardown_stream_context *ctx;
+       struct mlx5_fpga_tls_command_context *ctx;
        struct mlx5_fpga_dma_buf *buf;
        void *cmd;
 
@@ -253,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
        if (!ctx)
                return;
 
-       buf = &ctx->cmd.buf;
+       buf = &ctx->buf;
        cmd = (ctx + 1);
        MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
        MLX5_SET(tls_cmd, cmd, swid, swid);
@@ -264,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
        buf->sg[0].data = cmd;
        buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
 
-       ctx->swid = swid;
-       mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
+       mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
                               mlx5_fpga_tls_teardown_completion);
 }
 
@@ -275,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
        struct mlx5_fpga_tls *tls = mdev->fpga->tls;
        void *flow;
 
-       rcu_read_lock();
        if (direction_sx)
-               flow = idr_find(&tls->tx_idr, swid);
+               flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
+                                                 &tls->tx_idr_spinlock,
+                                                 swid);
        else
-               flow = idr_find(&tls->rx_idr, swid);
-
-       rcu_read_unlock();
+               flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
+                                                 &tls->rx_idr_spinlock,
+                                                 swid);
 
        if (!flow) {
                mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
@@ -289,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
                return;
        }
 
+       synchronize_rcu(); /* before kfree(flow) */
        mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
 }
 
index 70cc906a102b2dde87d161385126f43da4948266..76716419370df9738384675c3dc36e5d0ff6cf27 100644 (file)
@@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = {
                        .size   = 8,
                        .limit  = 4
                },
-               .mr_cache[16]   = {
-                       .size   = 8,
-                       .limit  = 4
-               },
-               .mr_cache[17]   = {
-                       .size   = 8,
-                       .limit  = 4
-               },
-               .mr_cache[18]   = {
-                       .size   = 8,
-                       .limit  = 4
-               },
-               .mr_cache[19]   = {
-                       .size   = 4,
-                       .limit  = 2
-               },
-               .mr_cache[20]   = {
-                       .size   = 4,
-                       .limit  = 2
-               },
        },
 };
 
index 21b7f05b16a5f6053a88c1cdb9067c0f9e26ea10..361468e0435dcc9fbb667716e483c4104ebf7fea 100644 (file)
@@ -317,10 +317,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
                size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
 
        i2c_addr = MLX5_I2C_ADDR_LOW;
-       if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
-               i2c_addr = MLX5_I2C_ADDR_HIGH;
-               offset -= MLX5_EEPROM_PAGE_LENGTH;
-       }
 
        MLX5_SET(mcia_reg, in, l, 0);
        MLX5_SET(mcia_reg, in, module, module_num);
index 370ca94b677586728541bec1099acd2ef6dc227a..b8ba74de95558f84c29b26c80fb1ccb30889b83f 100644 (file)
@@ -40,6 +40,9 @@
 #include "mlx5_core.h"
 #include "lib/eq.h"
 
+static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
+                              struct mlx5_core_dct *dct);
+
 static struct mlx5_core_rsc_common *
 mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
 {
@@ -227,20 +230,49 @@ static void destroy_resource_common(struct mlx5_core_dev *dev,
        wait_for_completion(&qp->common.free);
 }
 
+static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+                                 struct mlx5_core_dct *dct, bool need_cleanup)
+{
+       u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
+       u32 in[MLX5_ST_SZ_DW(destroy_dct_in)]   = {0};
+       struct mlx5_core_qp *qp = &dct->mqp;
+       int err;
+
+       err = mlx5_core_drain_dct(dev, dct);
+       if (err) {
+               if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+                       goto destroy;
+               } else {
+                       mlx5_core_warn(
+                               dev, "failed drain DCT 0x%x with error 0x%x\n",
+                               qp->qpn, err);
+                       return err;
+               }
+       }
+       wait_for_completion(&dct->drained);
+destroy:
+       if (need_cleanup)
+               destroy_resource_common(dev, &dct->mqp);
+       MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
+       MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
+       MLX5_SET(destroy_dct_in, in, uid, qp->uid);
+       err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
+                           (void *)&out, sizeof(out));
+       return err;
+}
+
 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
                         struct mlx5_core_dct *dct,
-                        u32 *in, int inlen)
+                        u32 *in, int inlen,
+                        u32 *out, int outlen)
 {
-       u32 out[MLX5_ST_SZ_DW(create_dct_out)]   = {0};
-       u32 din[MLX5_ST_SZ_DW(destroy_dct_in)]   = {0};
-       u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
        struct mlx5_core_qp *qp = &dct->mqp;
        int err;
 
        init_completion(&dct->drained);
        MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
 
-       err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+       err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
        if (err) {
                mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
                return err;
@@ -254,11 +286,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
 
        return 0;
 err_cmd:
-       MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
-       MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
-       MLX5_SET(destroy_dct_in, din, uid, qp->uid);
-       mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
-                     (void *)&out, sizeof(dout));
+       _mlx5_core_destroy_dct(dev, dct, false);
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
@@ -323,29 +351,7 @@ static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
                          struct mlx5_core_dct *dct)
 {
-       u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
-       u32 in[MLX5_ST_SZ_DW(destroy_dct_in)]   = {0};
-       struct mlx5_core_qp *qp = &dct->mqp;
-       int err;
-
-       err = mlx5_core_drain_dct(dev, dct);
-       if (err) {
-               if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
-                       goto destroy;
-               } else {
-                       mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
-                       return err;
-               }
-       }
-       wait_for_completion(&dct->drained);
-destroy:
-       destroy_resource_common(dev, &dct->mqp);
-       MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
-       MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
-       MLX5_SET(destroy_dct_in, in, uid, qp->uid);
-       err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
-                           (void *)&out, sizeof(out));
-       return err;
+       return _mlx5_core_destroy_dct(dev, dct, true);
 }
 EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
 
index d23d53c0e2842bc0e28a179d3ef6457a3369cee7..f26a4ca293637b48ccc9aa8b9e55f375f767eedb 100644 (file)
@@ -568,7 +568,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
        if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
                return 0;
 
-       emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
+       emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
        if (!emad_wq)
                return -ENOMEM;
        mlxsw_core->emad_wq = emad_wq;
@@ -1958,10 +1958,10 @@ static int __init mlxsw_core_module_init(void)
 {
        int err;
 
-       mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
+       mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
        if (!mlxsw_wq)
                return -ENOMEM;
-       mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
+       mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
                                            mlxsw_core_driver_name);
        if (!mlxsw_owq) {
                err = -ENOMEM;
index 7a15e932ed2f5c8ddaee26ab078c943786cac421..c1c1965d7accabca443888932c30090564433d7c 100644 (file)
@@ -113,7 +113,7 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
                return 0;
        default:
                /* Do not consider thresholds for zero temperature. */
-               if (!MLXSW_REG_MTMP_TEMP_TO_MC(module_temp)) {
+               if (MLXSW_REG_MTMP_TEMP_TO_MC(module_temp) == 0) {
                        *temp = 0;
                        return 0;
                }
index ffee38e36ce8995348f776bbdbb8e4601b36b223..8648ca1712543abf8e6ef7bcabfae895c4009465 100644 (file)
@@ -27,7 +27,7 @@
 
 #define MLXSW_PCI_SW_RESET                     0xF0010
 #define MLXSW_PCI_SW_RESET_RST_BIT             BIT(0)
-#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS       13000
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS       20000
 #define MLXSW_PCI_SW_RESET_WAIT_MSECS          100
 #define MLXSW_PCI_FW_READY                     0xA1844
 #define MLXSW_PCI_FW_READY_MASK                        0xFFFF
index 9eb63300c1d3a712d6377ea2835c5d64f2b32678..6b8aa3761899b03e7c9211b7d3272a1027a57371 100644 (file)
@@ -3126,11 +3126,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
        if (err)
                return err;
 
+       mlxsw_sp_port->link.autoneg = autoneg;
+
        if (!netif_running(dev))
                return 0;
 
-       mlxsw_sp_port->link.autoneg = autoneg;
-
        mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
        mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
 
@@ -3316,7 +3316,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
                err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
                                            MLXSW_REG_QEEC_HIERARCY_TC,
                                            i + 8, i,
-                                           false, 0);
+                                           true, 100);
                if (err)
                        return err;
        }
index 9a79b5e1159743a9b619407cd3b9b9af065c97ca..d633bef5f10512269547c00f718f552720dd29a3 100644 (file)
@@ -70,6 +70,7 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
        {MLXSW_REG_SBXX_DIR_EGRESS, 1},
        {MLXSW_REG_SBXX_DIR_EGRESS, 2},
        {MLXSW_REG_SBXX_DIR_EGRESS, 3},
+       {MLXSW_REG_SBXX_DIR_EGRESS, 15},
 };
 
 #define MLXSW_SP_SB_ING_TC_COUNT 8
@@ -428,6 +429,7 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
        MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+       MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
 };
 
 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
@@ -517,14 +519,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
        MLXSW_SP_SB_CM(0, 7, 4),
        MLXSW_SP_SB_CM(0, 7, 4),
        MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
-       MLXSW_SP_SB_CM(0, 7, 4),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+       MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
        MLXSW_SP_SB_CM(1, 0xff, 4),
 };
 
@@ -671,6 +673,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
        MLXSW_SP_SB_PM(0, 0),
        MLXSW_SP_SB_PM(0, 0),
        MLXSW_SP_SB_PM(0, 0),
+       MLXSW_SP_SB_PM(10000, 90000),
 };
 
 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
index 52fed8c7bf1edf61aa1c3b61c04f71e8dfc9717f..902e766a8ed33eabbe0b11075284878ba3fb3ad4 100644 (file)
@@ -6781,7 +6781,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
        /* A RIF is not created for macvlan netdevs. Their MAC is used to
         * populate the FDB
         */
-       if (netif_is_macvlan(dev))
+       if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
                return 0;
 
        for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
index f6ce386c30367f08a86153b8ac691a1480a1588a..50111f228d77228758d5e0ad634b1848712e11d4 100644 (file)
@@ -1630,7 +1630,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
        u16 fid_index;
        int err = 0;
 
-       if (switchdev_trans_ph_prepare(trans))
+       if (switchdev_trans_ph_commit(trans))
                return 0;
 
        bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
index bd6e9014bc74794b9a8a7e680f5b59ea7048382f..7849119d407aef1a7b92d6b0e047a8f74b4867f7 100644 (file)
@@ -142,6 +142,12 @@ struct ks8851_net {
 
 static int msg_enable;
 
+/* SPI frame opcodes */
+#define KS_SPIOP_RD    (0x00)
+#define KS_SPIOP_WR    (0x40)
+#define KS_SPIOP_RXFIFO        (0x80)
+#define KS_SPIOP_TXFIFO        (0xC0)
+
 /* shift for byte-enable data */
 #define BYTE_EN(_x)    ((_x) << 2)
 
@@ -535,9 +541,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
                /* set dma read address */
                ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
 
-               /* start the packet dma process, and set auto-dequeue rx */
-               ks8851_wrreg16(ks, KS_RXQCR,
-                              ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
+               /* start DMA access */
+               ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
 
                if (rxlen > 4) {
                        unsigned int rxalign;
@@ -568,7 +573,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
                        }
                }
 
-               ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+               /* end DMA access and dequeue packet */
+               ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF);
        }
 }
 
@@ -785,6 +791,15 @@ static void ks8851_tx_work(struct work_struct *work)
 static int ks8851_net_open(struct net_device *dev)
 {
        struct ks8851_net *ks = netdev_priv(dev);
+       int ret;
+
+       ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
+                                  IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+                                  dev->name, ks);
+       if (ret < 0) {
+               netdev_err(dev, "failed to get irq\n");
+               return ret;
+       }
 
        /* lock the card, even if we may not actually be doing anything
         * else at the moment */
@@ -849,6 +864,7 @@ static int ks8851_net_open(struct net_device *dev)
        netif_dbg(ks, ifup, ks->netdev, "network device up\n");
 
        mutex_unlock(&ks->lock);
+       mii_check_link(&ks->mii);
        return 0;
 }
 
@@ -899,6 +915,8 @@ static int ks8851_net_stop(struct net_device *dev)
                dev_kfree_skb(txb);
        }
 
+       free_irq(dev->irq, ks);
+
        return 0;
 }
 
@@ -1508,6 +1526,7 @@ static int ks8851_probe(struct spi_device *spi)
 
        spi_set_drvdata(spi, ks);
 
+       netif_carrier_off(ks->netdev);
        ndev->if_port = IF_PORT_100BASET;
        ndev->netdev_ops = &ks8851_netdev_ops;
        ndev->irq = spi->irq;
@@ -1529,14 +1548,6 @@ static int ks8851_probe(struct spi_device *spi)
        ks8851_read_selftest(ks);
        ks8851_init_mac(ks);
 
-       ret = request_threaded_irq(spi->irq, NULL, ks8851_irq,
-                                  IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-                                  ndev->name, ks);
-       if (ret < 0) {
-               dev_err(&spi->dev, "failed to get irq\n");
-               goto err_irq;
-       }
-
        ret = register_netdev(ndev);
        if (ret) {
                dev_err(&spi->dev, "failed to register network device\n");
@@ -1549,14 +1560,10 @@ static int ks8851_probe(struct spi_device *spi)
 
        return 0;
 
-
 err_netdev:
-       free_irq(ndev->irq, ks);
-
-err_irq:
+err_id:
        if (gpio_is_valid(gpio))
                gpio_set_value(gpio, 0);
-err_id:
        regulator_disable(ks->vdd_reg);
 err_reg:
        regulator_disable(ks->vdd_io);
@@ -1574,7 +1581,6 @@ static int ks8851_remove(struct spi_device *spi)
                dev_info(&spi->dev, "remove\n");
 
        unregister_netdev(priv->netdev);
-       free_irq(spi->irq, priv);
        if (gpio_is_valid(priv->gpio))
                gpio_set_value(priv->gpio, 0);
        regulator_disable(priv->vdd_reg);
index 852256ef1f2233b9d60efa0b01da45e128514a14..23da1e3ee429af922c603b5a53e70820e398f38a 100644 (file)
 */
 
 #define KS_CCR                                 0x08
+#define CCR_LE                                 (1 << 10)   /* KSZ8851-16MLL */
 #define CCR_EEPROM                             (1 << 9)
-#define CCR_SPI                                        (1 << 8)
-#define CCR_32PIN                              (1 << 0)
+#define CCR_SPI                                        (1 << 8)    /* KSZ8851SNL    */
+#define CCR_8BIT                               (1 << 7)    /* KSZ8851-16MLL */
+#define CCR_16BIT                              (1 << 6)    /* KSZ8851-16MLL */
+#define CCR_32BIT                              (1 << 5)    /* KSZ8851-16MLL */
+#define CCR_SHARED                             (1 << 4)    /* KSZ8851-16MLL */
+#define CCR_48PIN                              (1 << 1)    /* KSZ8851-16MLL */
+#define CCR_32PIN                              (1 << 0)    /* KSZ8851SNL    */
 
 /* MAC address registers */
 #define KS_MAR(_m)                             (0x15 - (_m))
 #define RXCR1_RXE                              (1 << 0)
 
 #define KS_RXCR2                               0x76
-#define RXCR2_SRDBL_MASK                       (0x7 << 5)
-#define RXCR2_SRDBL_SHIFT                      (5)
-#define RXCR2_SRDBL_4B                         (0x0 << 5)
-#define RXCR2_SRDBL_8B                         (0x1 << 5)
-#define RXCR2_SRDBL_16B                                (0x2 << 5)
-#define RXCR2_SRDBL_32B                                (0x3 << 5)
-#define RXCR2_SRDBL_FRAME                      (0x4 << 5)
+#define RXCR2_SRDBL_MASK                       (0x7 << 5)  /* KSZ8851SNL    */
+#define RXCR2_SRDBL_SHIFT                      (5)         /* KSZ8851SNL    */
+#define RXCR2_SRDBL_4B                         (0x0 << 5)  /* KSZ8851SNL    */
+#define RXCR2_SRDBL_8B                         (0x1 << 5)  /* KSZ8851SNL    */
+#define RXCR2_SRDBL_16B                                (0x2 << 5)  /* KSZ8851SNL    */
+#define RXCR2_SRDBL_32B                                (0x3 << 5)  /* KSZ8851SNL    */
+#define RXCR2_SRDBL_FRAME                      (0x4 << 5)  /* KSZ8851SNL    */
 #define RXCR2_IUFFP                            (1 << 4)
 #define RXCR2_RXIUFCEZ                         (1 << 3)
 #define RXCR2_UDPLFE                           (1 << 2)
 #define RXFSHR_RXCE                            (1 << 0)
 
 #define KS_RXFHBCR                             0x7E
+#define RXFHBCR_CNT_MASK                       (0xfff << 0)
+
 #define KS_TXQCR                               0x80
-#define TXQCR_AETFE                            (1 << 2)
+#define TXQCR_AETFE                            (1 << 2)    /* KSZ8851SNL    */
 #define TXQCR_TXQMAM                           (1 << 1)
 #define TXQCR_METFE                            (1 << 0)
 
 
 #define KS_RXFDPR                              0x86
 #define RXFDPR_RXFPAI                          (1 << 14)
+#define RXFDPR_WST                             (1 << 12)   /* KSZ8851-16MLL */
+#define RXFDPR_EMS                             (1 << 11)   /* KSZ8851-16MLL */
+#define RXFDPR_RXFP_MASK                       (0x7ff << 0)
+#define RXFDPR_RXFP_SHIFT                      (0)
 
 #define KS_RXDTTR                              0x8C
 #define KS_RXDBCTR                             0x8E
 #define IRQ_RXMPDI                             (1 << 4)
 #define IRQ_LDI                                        (1 << 3)
 #define IRQ_EDI                                        (1 << 2)
-#define IRQ_SPIBEI                             (1 << 1)
+#define IRQ_SPIBEI                             (1 << 1)    /* KSZ8851SNL    */
 #define IRQ_DEDI                               (1 << 0)
 
 #define KS_RXFCTR                              0x9C
 #define KS_P1ANLPR                             0xEE
 
 #define KS_P1SCLMD                             0xF4
-#define P1SCLMD_LEDOFF                         (1 << 15)
-#define P1SCLMD_TXIDS                          (1 << 14)
-#define P1SCLMD_RESTARTAN                      (1 << 13)
-#define P1SCLMD_DISAUTOMDIX                    (1 << 10)
-#define P1SCLMD_FORCEMDIX                      (1 << 9)
-#define P1SCLMD_AUTONEGEN                      (1 << 7)
-#define P1SCLMD_FORCE100                       (1 << 6)
-#define P1SCLMD_FORCEFDX                       (1 << 5)
-#define P1SCLMD_ADV_FLOW                       (1 << 4)
-#define P1SCLMD_ADV_100BT_FDX                  (1 << 3)
-#define P1SCLMD_ADV_100BT_HDX                  (1 << 2)
-#define P1SCLMD_ADV_10BT_FDX                   (1 << 1)
-#define P1SCLMD_ADV_10BT_HDX                   (1 << 0)
 
 #define KS_P1CR                                        0xF6
-#define P1CR_HP_MDIX                           (1 << 15)
-#define P1CR_REV_POL                           (1 << 13)
-#define P1CR_OP_100M                           (1 << 10)
-#define P1CR_OP_FDX                            (1 << 9)
-#define P1CR_OP_MDI                            (1 << 7)
-#define P1CR_AN_DONE                           (1 << 6)
-#define P1CR_LINK_GOOD                         (1 << 5)
-#define P1CR_PNTR_FLOW                         (1 << 4)
-#define P1CR_PNTR_100BT_FDX                    (1 << 3)
-#define P1CR_PNTR_100BT_HDX                    (1 << 2)
-#define P1CR_PNTR_10BT_FDX                     (1 << 1)
-#define P1CR_PNTR_10BT_HDX                     (1 << 0)
+#define P1CR_LEDOFF                            (1 << 15)
+#define P1CR_TXIDS                             (1 << 14)
+#define P1CR_RESTARTAN                         (1 << 13)
+#define P1CR_DISAUTOMDIX                       (1 << 10)
+#define P1CR_FORCEMDIX                         (1 << 9)
+#define P1CR_AUTONEGEN                         (1 << 7)
+#define P1CR_FORCE100                          (1 << 6)
+#define P1CR_FORCEFDX                          (1 << 5)
+#define P1CR_ADV_FLOW                          (1 << 4)
+#define P1CR_ADV_100BT_FDX                     (1 << 3)
+#define P1CR_ADV_100BT_HDX                     (1 << 2)
+#define P1CR_ADV_10BT_FDX                      (1 << 1)
+#define P1CR_ADV_10BT_HDX                      (1 << 0)
+
+#define KS_P1SR                                        0xF8
+#define P1SR_HP_MDIX                           (1 << 15)
+#define P1SR_REV_POL                           (1 << 13)
+#define P1SR_OP_100M                           (1 << 10)
+#define P1SR_OP_FDX                            (1 << 9)
+#define P1SR_OP_MDI                            (1 << 7)
+#define P1SR_AN_DONE                           (1 << 6)
+#define P1SR_LINK_GOOD                         (1 << 5)
+#define P1SR_PNTR_FLOW                         (1 << 4)
+#define P1SR_PNTR_100BT_FDX                    (1 << 3)
+#define P1SR_PNTR_100BT_HDX                    (1 << 2)
+#define P1SR_PNTR_10BT_FDX                     (1 << 1)
+#define P1SR_PNTR_10BT_HDX                     (1 << 0)
 
 /* TX Frame control */
-
 #define TXFR_TXIC                              (1 << 15)
 #define TXFR_TXFID_MASK                                (0x3f << 0)
 #define TXFR_TXFID_SHIFT                       (0)
-
-/* SPI frame opcodes */
-#define KS_SPIOP_RD                            (0x00)
-#define KS_SPIOP_WR                            (0x40)
-#define KS_SPIOP_RXFIFO                                (0x80)
-#define KS_SPIOP_TXFIFO                                (0xC0)
index 35f8c9ef204d91cd4c17591d84ebab597cff33b4..c946841c0a066d2e7eabd059092ed6cbbb156b01 100644 (file)
@@ -40,6 +40,8 @@
 #include <linux/of_device.h>
 #include <linux/of_net.h>
 
+#include "ks8851.h"
+
 #define        DRV_NAME        "ks8851_mll"
 
 static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
@@ -48,319 +50,10 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
 #define TX_BUF_SIZE                    2000
 #define RX_BUF_SIZE                    2000
 
-#define KS_CCR                         0x08
-#define CCR_EEPROM                     (1 << 9)
-#define CCR_SPI                                (1 << 8)
-#define CCR_8BIT                       (1 << 7)
-#define CCR_16BIT                      (1 << 6)
-#define CCR_32BIT                      (1 << 5)
-#define CCR_SHARED                     (1 << 4)
-#define CCR_32PIN                      (1 << 0)
-
-/* MAC address registers */
-#define KS_MARL                                0x10
-#define KS_MARM                                0x12
-#define KS_MARH                                0x14
-
-#define KS_OBCR                                0x20
-#define OBCR_ODS_16MA                  (1 << 6)
-
-#define KS_EEPCR                       0x22
-#define EEPCR_EESA                     (1 << 4)
-#define EEPCR_EESB                     (1 << 3)
-#define EEPCR_EEDO                     (1 << 2)
-#define EEPCR_EESCK                    (1 << 1)
-#define EEPCR_EECS                     (1 << 0)
-
-#define KS_MBIR                                0x24
-#define MBIR_TXMBF                     (1 << 12)
-#define MBIR_TXMBFA                    (1 << 11)
-#define MBIR_RXMBF                     (1 << 4)
-#define MBIR_RXMBFA                    (1 << 3)
-
-#define KS_GRR                         0x26
-#define GRR_QMU                                (1 << 1)
-#define GRR_GSR                                (1 << 0)
-
-#define KS_WFCR                                0x2A
-#define WFCR_MPRXE                     (1 << 7)
-#define WFCR_WF3E                      (1 << 3)
-#define WFCR_WF2E                      (1 << 2)
-#define WFCR_WF1E                      (1 << 1)
-#define WFCR_WF0E                      (1 << 0)
-
-#define KS_WF0CRC0                     0x30
-#define KS_WF0CRC1                     0x32
-#define KS_WF0BM0                      0x34
-#define KS_WF0BM1                      0x36
-#define KS_WF0BM2                      0x38
-#define KS_WF0BM3                      0x3A
-
-#define KS_WF1CRC0                     0x40
-#define KS_WF1CRC1                     0x42
-#define KS_WF1BM0                      0x44
-#define KS_WF1BM1                      0x46
-#define KS_WF1BM2                      0x48
-#define KS_WF1BM3                      0x4A
-
-#define KS_WF2CRC0                     0x50
-#define KS_WF2CRC1                     0x52
-#define KS_WF2BM0                      0x54
-#define KS_WF2BM1                      0x56
-#define KS_WF2BM2                      0x58
-#define KS_WF2BM3                      0x5A
-
-#define KS_WF3CRC0                     0x60
-#define KS_WF3CRC1                     0x62
-#define KS_WF3BM0                      0x64
-#define KS_WF3BM1                      0x66
-#define KS_WF3BM2                      0x68
-#define KS_WF3BM3                      0x6A
-
-#define KS_TXCR                                0x70
-#define TXCR_TCGICMP                   (1 << 8)
-#define TXCR_TCGUDP                    (1 << 7)
-#define TXCR_TCGTCP                    (1 << 6)
-#define TXCR_TCGIP                     (1 << 5)
-#define TXCR_FTXQ                      (1 << 4)
-#define TXCR_TXFCE                     (1 << 3)
-#define TXCR_TXPE                      (1 << 2)
-#define TXCR_TXCRC                     (1 << 1)
-#define TXCR_TXE                       (1 << 0)
-
-#define KS_TXSR                                0x72
-#define TXSR_TXLC                      (1 << 13)
-#define TXSR_TXMC                      (1 << 12)
-#define TXSR_TXFID_MASK                        (0x3f << 0)
-#define TXSR_TXFID_SHIFT               (0)
-#define TXSR_TXFID_GET(_v)             (((_v) >> 0) & 0x3f)
-
-
-#define KS_RXCR1                       0x74
-#define RXCR1_FRXQ                     (1 << 15)
-#define RXCR1_RXUDPFCC                 (1 << 14)
-#define RXCR1_RXTCPFCC                 (1 << 13)
-#define RXCR1_RXIPFCC                  (1 << 12)
-#define RXCR1_RXPAFMA                  (1 << 11)
-#define RXCR1_RXFCE                    (1 << 10)
-#define RXCR1_RXEFE                    (1 << 9)
-#define RXCR1_RXMAFMA                  (1 << 8)
-#define RXCR1_RXBE                     (1 << 7)
-#define RXCR1_RXME                     (1 << 6)
-#define RXCR1_RXUE                     (1 << 5)
-#define RXCR1_RXAE                     (1 << 4)
-#define RXCR1_RXINVF                   (1 << 1)
-#define RXCR1_RXE                      (1 << 0)
 #define RXCR1_FILTER_MASK              (RXCR1_RXINVF | RXCR1_RXAE | \
                                         RXCR1_RXMAFMA | RXCR1_RXPAFMA)
-
-#define KS_RXCR2                       0x76
-#define RXCR2_SRDBL_MASK               (0x7 << 5)
-#define RXCR2_SRDBL_SHIFT              (5)
-#define RXCR2_SRDBL_4B                 (0x0 << 5)
-#define RXCR2_SRDBL_8B                 (0x1 << 5)
-#define RXCR2_SRDBL_16B                        (0x2 << 5)
-#define RXCR2_SRDBL_32B                        (0x3 << 5)
-/* #define RXCR2_SRDBL_FRAME           (0x4 << 5) */
-#define RXCR2_IUFFP                    (1 << 4)
-#define RXCR2_RXIUFCEZ                 (1 << 3)
-#define RXCR2_UDPLFE                   (1 << 2)
-#define RXCR2_RXICMPFCC                        (1 << 1)
-#define RXCR2_RXSAF                    (1 << 0)
-
-#define KS_TXMIR                       0x78
-
-#define KS_RXFHSR                      0x7C
-#define RXFSHR_RXFV                    (1 << 15)
-#define RXFSHR_RXICMPFCS               (1 << 13)
-#define RXFSHR_RXIPFCS                 (1 << 12)
-#define RXFSHR_RXTCPFCS                        (1 << 11)
-#define RXFSHR_RXUDPFCS                        (1 << 10)
-#define RXFSHR_RXBF                    (1 << 7)
-#define RXFSHR_RXMF                    (1 << 6)
-#define RXFSHR_RXUF                    (1 << 5)
-#define RXFSHR_RXMR                    (1 << 4)
-#define RXFSHR_RXFT                    (1 << 3)
-#define RXFSHR_RXFTL                   (1 << 2)
-#define RXFSHR_RXRF                    (1 << 1)
-#define RXFSHR_RXCE                    (1 << 0)
-#define        RXFSHR_ERR                      (RXFSHR_RXCE | RXFSHR_RXRF |\
-                                       RXFSHR_RXFTL | RXFSHR_RXMR |\
-                                       RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
-                                       RXFSHR_RXTCPFCS)
-#define KS_RXFHBCR                     0x7E
-#define RXFHBCR_CNT_MASK               0x0FFF
-
-#define KS_TXQCR                       0x80
-#define TXQCR_AETFE                    (1 << 2)
-#define TXQCR_TXQMAM                   (1 << 1)
-#define TXQCR_METFE                    (1 << 0)
-
-#define KS_RXQCR                       0x82
-#define RXQCR_RXDTTS                   (1 << 12)
-#define RXQCR_RXDBCTS                  (1 << 11)
-#define RXQCR_RXFCTS                   (1 << 10)
-#define RXQCR_RXIPHTOE                 (1 << 9)
-#define RXQCR_RXDTTE                   (1 << 7)
-#define RXQCR_RXDBCTE                  (1 << 6)
-#define RXQCR_RXFCTE                   (1 << 5)
-#define RXQCR_ADRFE                    (1 << 4)
-#define RXQCR_SDA                      (1 << 3)
-#define RXQCR_RRXEF                    (1 << 0)
 #define RXQCR_CMD_CNTL                 (RXQCR_RXFCTE|RXQCR_ADRFE)
 
-#define KS_TXFDPR                      0x84
-#define TXFDPR_TXFPAI                  (1 << 14)
-#define TXFDPR_TXFP_MASK               (0x7ff << 0)
-#define TXFDPR_TXFP_SHIFT              (0)
-
-#define KS_RXFDPR                      0x86
-#define RXFDPR_RXFPAI                  (1 << 14)
-
-#define KS_RXDTTR                      0x8C
-#define KS_RXDBCTR                     0x8E
-
-#define KS_IER                         0x90
-#define KS_ISR                         0x92
-#define IRQ_LCI                                (1 << 15)
-#define IRQ_TXI                                (1 << 14)
-#define IRQ_RXI                                (1 << 13)
-#define IRQ_RXOI                       (1 << 11)
-#define IRQ_TXPSI                      (1 << 9)
-#define IRQ_RXPSI                      (1 << 8)
-#define IRQ_TXSAI                      (1 << 6)
-#define IRQ_RXWFDI                     (1 << 5)
-#define IRQ_RXMPDI                     (1 << 4)
-#define IRQ_LDI                                (1 << 3)
-#define IRQ_EDI                                (1 << 2)
-#define IRQ_SPIBEI                     (1 << 1)
-#define IRQ_DEDI                       (1 << 0)
-
-#define KS_RXFCTR                      0x9C
-#define RXFCTR_THRESHOLD_MASK          0x00FF
-
-#define KS_RXFC                                0x9D
-#define RXFCTR_RXFC_MASK               (0xff << 8)
-#define RXFCTR_RXFC_SHIFT              (8)
-#define RXFCTR_RXFC_GET(_v)            (((_v) >> 8) & 0xff)
-#define RXFCTR_RXFCT_MASK              (0xff << 0)
-#define RXFCTR_RXFCT_SHIFT             (0)
-
-#define KS_TXNTFSR                     0x9E
-
-#define KS_MAHTR0                      0xA0
-#define KS_MAHTR1                      0xA2
-#define KS_MAHTR2                      0xA4
-#define KS_MAHTR3                      0xA6
-
-#define KS_FCLWR                       0xB0
-#define KS_FCHWR                       0xB2
-#define KS_FCOWR                       0xB4
-
-#define KS_CIDER                       0xC0
-#define CIDER_ID                       0x8870
-#define CIDER_REV_MASK                 (0x7 << 1)
-#define CIDER_REV_SHIFT                        (1)
-#define CIDER_REV_GET(_v)              (((_v) >> 1) & 0x7)
-
-#define KS_CGCR                                0xC6
-#define KS_IACR                                0xC8
-#define IACR_RDEN                      (1 << 12)
-#define IACR_TSEL_MASK                 (0x3 << 10)
-#define IACR_TSEL_SHIFT                        (10)
-#define IACR_TSEL_MIB                  (0x3 << 10)
-#define IACR_ADDR_MASK                 (0x1f << 0)
-#define IACR_ADDR_SHIFT                        (0)
-
-#define KS_IADLR                       0xD0
-#define KS_IAHDR                       0xD2
-
-#define KS_PMECR                       0xD4
-#define PMECR_PME_DELAY                        (1 << 14)
-#define PMECR_PME_POL                  (1 << 12)
-#define PMECR_WOL_WAKEUP               (1 << 11)
-#define PMECR_WOL_MAGICPKT             (1 << 10)
-#define PMECR_WOL_LINKUP               (1 << 9)
-#define PMECR_WOL_ENERGY               (1 << 8)
-#define PMECR_AUTO_WAKE_EN             (1 << 7)
-#define PMECR_WAKEUP_NORMAL            (1 << 6)
-#define PMECR_WKEVT_MASK               (0xf << 2)
-#define PMECR_WKEVT_SHIFT              (2)
-#define PMECR_WKEVT_GET(_v)            (((_v) >> 2) & 0xf)
-#define PMECR_WKEVT_ENERGY             (0x1 << 2)
-#define PMECR_WKEVT_LINK               (0x2 << 2)
-#define PMECR_WKEVT_MAGICPKT           (0x4 << 2)
-#define PMECR_WKEVT_FRAME              (0x8 << 2)
-#define PMECR_PM_MASK                  (0x3 << 0)
-#define PMECR_PM_SHIFT                 (0)
-#define PMECR_PM_NORMAL                        (0x0 << 0)
-#define PMECR_PM_ENERGY                        (0x1 << 0)
-#define PMECR_PM_SOFTDOWN              (0x2 << 0)
-#define PMECR_PM_POWERSAVE             (0x3 << 0)
-
-/* Standard MII PHY data */
-#define KS_P1MBCR                      0xE4
-#define P1MBCR_FORCE_FDX               (1 << 8)
-
-#define KS_P1MBSR                      0xE6
-#define P1MBSR_AN_COMPLETE             (1 << 5)
-#define P1MBSR_AN_CAPABLE              (1 << 3)
-#define P1MBSR_LINK_UP                 (1 << 2)
-
-#define KS_PHY1ILR                     0xE8
-#define KS_PHY1IHR                     0xEA
-#define KS_P1ANAR                      0xEC
-#define KS_P1ANLPR                     0xEE
-
-#define KS_P1SCLMD                     0xF4
-#define P1SCLMD_LEDOFF                 (1 << 15)
-#define P1SCLMD_TXIDS                  (1 << 14)
-#define P1SCLMD_RESTARTAN              (1 << 13)
-#define P1SCLMD_DISAUTOMDIX            (1 << 10)
-#define P1SCLMD_FORCEMDIX              (1 << 9)
-#define P1SCLMD_AUTONEGEN              (1 << 7)
-#define P1SCLMD_FORCE100               (1 << 6)
-#define P1SCLMD_FORCEFDX               (1 << 5)
-#define P1SCLMD_ADV_FLOW               (1 << 4)
-#define P1SCLMD_ADV_100BT_FDX          (1 << 3)
-#define P1SCLMD_ADV_100BT_HDX          (1 << 2)
-#define P1SCLMD_ADV_10BT_FDX           (1 << 1)
-#define P1SCLMD_ADV_10BT_HDX           (1 << 0)
-
-#define KS_P1CR                                0xF6
-#define P1CR_HP_MDIX                   (1 << 15)
-#define P1CR_REV_POL                   (1 << 13)
-#define P1CR_OP_100M                   (1 << 10)
-#define P1CR_OP_FDX                    (1 << 9)
-#define P1CR_OP_MDI                    (1 << 7)
-#define P1CR_AN_DONE                   (1 << 6)
-#define P1CR_LINK_GOOD                 (1 << 5)
-#define P1CR_PNTR_FLOW                 (1 << 4)
-#define P1CR_PNTR_100BT_FDX            (1 << 3)
-#define P1CR_PNTR_100BT_HDX            (1 << 2)
-#define P1CR_PNTR_10BT_FDX             (1 << 1)
-#define P1CR_PNTR_10BT_HDX             (1 << 0)
-
-/* TX Frame control */
-
-#define TXFR_TXIC                      (1 << 15)
-#define TXFR_TXFID_MASK                        (0x3f << 0)
-#define TXFR_TXFID_SHIFT               (0)
-
-#define KS_P1SR                                0xF8
-#define P1SR_HP_MDIX                   (1 << 15)
-#define P1SR_REV_POL                   (1 << 13)
-#define P1SR_OP_100M                   (1 << 10)
-#define P1SR_OP_FDX                    (1 << 9)
-#define P1SR_OP_MDI                    (1 << 7)
-#define P1SR_AN_DONE                   (1 << 6)
-#define P1SR_LINK_GOOD                 (1 << 5)
-#define P1SR_PNTR_FLOW                 (1 << 4)
-#define P1SR_PNTR_100BT_FDX            (1 << 3)
-#define P1SR_PNTR_100BT_HDX            (1 << 2)
-#define P1SR_PNTR_10BT_FDX             (1 << 1)
-#define P1SR_PNTR_10BT_HDX             (1 << 0)
-
 #define        ENUM_BUS_NONE                   0
 #define        ENUM_BUS_8BIT                   1
 #define        ENUM_BUS_16BIT                  2
@@ -1475,7 +1168,7 @@ static void ks_setup(struct ks_net *ks)
        ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
 
        /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
-       ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
+       ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK);
 
        /* Setup RxQ Command Control (RXQCR) */
        ks->rc_rxqcr = RXQCR_CMD_CNTL;
@@ -1488,7 +1181,7 @@ static void ks_setup(struct ks_net *ks)
         */
 
        w = ks_rdreg16(ks, KS_P1MBCR);
-       w &= ~P1MBCR_FORCE_FDX;
+       w &= ~BMCR_FULLDPLX;
        ks_wrreg16(ks, KS_P1MBCR, w);
 
        w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
@@ -1629,7 +1322,7 @@ static int ks8851_probe(struct platform_device *pdev)
        ks_setup_int(ks);
 
        data = ks_rdreg16(ks, KS_OBCR);
-       ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
+       ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA);
 
        /* overwriting the default MAC address */
        if (pdev->dev.of_node) {
index a1d0d6e4253324f702f6eecae804fdd574b5f32e..d715ef4fc92fdb61a89122793133db147c1e4f59 100644 (file)
@@ -613,7 +613,7 @@ static int ocelot_mact_mc_add(struct ocelot_port *port,
                              struct netdev_hw_addr *hw_addr)
 {
        struct ocelot *ocelot = port->ocelot;
-       struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL);
+       struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_ATOMIC);
 
        if (!ha)
                return -ENOMEM;
@@ -959,10 +959,8 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
                       ETH_GSTRING_LEN);
 }
 
-static void ocelot_check_stats(struct work_struct *work)
+static void ocelot_update_stats(struct ocelot *ocelot)
 {
-       struct delayed_work *del_work = to_delayed_work(work);
-       struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work);
        int i, j;
 
        mutex_lock(&ocelot->stats_lock);
@@ -986,11 +984,19 @@ static void ocelot_check_stats(struct work_struct *work)
                }
        }
 
-       cancel_delayed_work(&ocelot->stats_work);
+       mutex_unlock(&ocelot->stats_lock);
+}
+
+static void ocelot_check_stats_work(struct work_struct *work)
+{
+       struct delayed_work *del_work = to_delayed_work(work);
+       struct ocelot *ocelot = container_of(del_work, struct ocelot,
+                                            stats_work);
+
+       ocelot_update_stats(ocelot);
+
        queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
                           OCELOT_STATS_CHECK_DELAY);
-
-       mutex_unlock(&ocelot->stats_lock);
 }
 
 static void ocelot_get_ethtool_stats(struct net_device *dev,
@@ -1001,7 +1007,7 @@ static void ocelot_get_ethtool_stats(struct net_device *dev,
        int i;
 
        /* check and update now */
-       ocelot_check_stats(&ocelot->stats_work.work);
+       ocelot_update_stats(ocelot);
 
        /* Copy all counters */
        for (i = 0; i < ocelot->num_stats; i++)
@@ -1809,7 +1815,7 @@ int ocelot_init(struct ocelot *ocelot)
                                 ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
                                 ANA_CPUQ_8021_CFG, i);
 
-       INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats);
+       INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
        queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
                           OCELOT_STATS_CHECK_DELAY);
        return 0;
index 7cde387e5ec62a0c36f070a163a6e5b9c38a6a4b..51cd57ab3d9584d3d67508cc94aa6c9590aa11d1 100644 (file)
@@ -2366,6 +2366,7 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
                                dma_object->addr))) {
                        vxge_os_dma_free(devh->pdev, memblock,
                                &dma_object->acc_handle);
+                       memblock = NULL;
                        goto exit;
                }
 
index 9852080cf45483c49db22663c7f8caa1f7fe5e3b..ff391308566525cd613acc3a733130b41e7246a9 100644 (file)
@@ -39,7 +39,7 @@ nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
        }
        if (knode->sel->off || knode->sel->offshift || knode->sel->offmask ||
            knode->sel->offoff || knode->fshift) {
-               NL_SET_ERR_MSG_MOD(extack, "variable offseting not supported");
+               NL_SET_ERR_MSG_MOD(extack, "variable offsetting not supported");
                return false;
        }
        if (knode->sel->hoff || knode->sel->hmask) {
@@ -78,7 +78,7 @@ nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
 
        k = &knode->sel->keys[0];
        if (k->offmask) {
-               NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offseting not supported");
+               NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offsetting not supported");
                return false;
        }
        if (k->off) {
index eeda4ed98333afbf5671c1fd6d4ad69bc443a9e4..e336f6ee94f5c6d3a9108b85a6fb0529ecb7031d 100644 (file)
@@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
 
        tmp_push_vlan_tci =
                FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
-               FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) |
-               NFP_FL_PUSH_VLAN_CFI;
+               FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
        push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
 }
 
index 4fcaf11ed56ed6f2ee89dde5313962676fba9f92..0ed51e79db00ebb90f0f9e8b2d9e74631c7ed4dc 100644 (file)
@@ -26,7 +26,7 @@
 #define NFP_FLOWER_LAYER2_GENEVE_OP    BIT(6)
 
 #define NFP_FLOWER_MASK_VLAN_PRIO      GENMASK(15, 13)
-#define NFP_FLOWER_MASK_VLAN_CFI       BIT(12)
+#define NFP_FLOWER_MASK_VLAN_PRESENT   BIT(12)
 #define NFP_FLOWER_MASK_VLAN_VID       GENMASK(11, 0)
 
 #define NFP_FLOWER_MASK_MPLS_LB                GENMASK(31, 12)
@@ -82,7 +82,6 @@
 #define NFP_FL_OUT_FLAGS_TYPE_IDX      GENMASK(2, 0)
 
 #define NFP_FL_PUSH_VLAN_PRIO          GENMASK(15, 13)
-#define NFP_FL_PUSH_VLAN_CFI           BIT(12)
 #define NFP_FL_PUSH_VLAN_VID           GENMASK(11, 0)
 
 #define IPV6_FLOW_LABEL_MASK           cpu_to_be32(0x000fffff)
index e03c8ef2c28c525b7fb44b4ce8f3e294d3d4fef6..9b8b843d0340374a6372501e2c69eb298563be75 100644 (file)
@@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
 
                flow_rule_match_vlan(rule, &match);
                /* Populate the tci field. */
-               if (match.key->vlan_id || match.key->vlan_priority) {
-                       tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
-                                            match.key->vlan_priority) |
-                                 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
-                                            match.key->vlan_id) |
-                                 NFP_FLOWER_MASK_VLAN_CFI;
-                       ext->tci = cpu_to_be16(tmp_tci);
-                       tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
-                                            match.mask->vlan_priority) |
-                                 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
-                                            match.mask->vlan_id) |
-                                 NFP_FLOWER_MASK_VLAN_CFI;
-                       msk->tci = cpu_to_be16(tmp_tci);
-               }
+               tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+               tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+                                     match.key->vlan_priority) |
+                          FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+                                     match.key->vlan_id);
+               ext->tci = cpu_to_be16(tmp_tci);
+
+               tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+               tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+                                     match.mask->vlan_priority) |
+                          FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+                                     match.mask->vlan_id);
+               msk->tci = cpu_to_be16(tmp_tci);
        }
 }
 
index d2c803bb4e562dd3805aaea86e689e5f682ce9ed..94d228c044963b8494c737aa1c46700d3598fd11 100644 (file)
@@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
        ret = dev_queue_xmit(skb);
        nfp_repr_inc_tx_stats(netdev, len, ret);
 
-       return ret;
+       return NETDEV_TX_OK;
 }
 
 static int nfp_repr_stop(struct net_device *netdev)
@@ -383,7 +383,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
        netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
        netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
 
-       netdev->priv_flags |= IFF_NO_QUEUE;
+       netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
        netdev->features |= NETIF_F_LLTX;
 
        if (nfp_app_has_tc(app)) {
index 43a57ec296fd9c6e55ec667f34b6ddd98880e41f..127c89b22ef0da7d60680481467517f83ca7bc72 100644 (file)
@@ -431,12 +431,16 @@ struct qed_qm_info {
        u8 num_pf_rls;
 };
 
+#define QED_OVERFLOW_BIT       1
+
 struct qed_db_recovery_info {
        struct list_head list;
 
        /* Lock to protect the doorbell recovery mechanism list */
        spinlock_t lock;
+       bool dorq_attn;
        u32 db_recovery_counter;
+       unsigned long overflow;
 };
 
 struct storm_stats {
@@ -920,8 +924,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
 
 /* doorbell recovery mechanism */
 void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
-void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
-                            enum qed_db_rec_exec db_exec);
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
 bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
 
 /* Other Linux specific common definitions */
index 9df8c4b3b54e3dc71fdca5a759a9d98b303d8ac7..866cdc86a3f27c879d4364089597ea499a0b1714 100644 (file)
@@ -102,11 +102,15 @@ static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn,
 
 /* Doorbell address sanity (address within doorbell bar range) */
 static bool qed_db_rec_sanity(struct qed_dev *cdev,
-                             void __iomem *db_addr, void *db_data)
+                             void __iomem *db_addr,
+                             enum qed_db_rec_width db_width,
+                             void *db_data)
 {
+       u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64;
+
        /* Make sure doorbell address is within the doorbell bar */
        if (db_addr < cdev->doorbells ||
-           (u8 __iomem *)db_addr >
+           (u8 __iomem *)db_addr + width >
            (u8 __iomem *)cdev->doorbells + cdev->db_size) {
                WARN(true,
                     "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
@@ -159,7 +163,7 @@ int qed_db_recovery_add(struct qed_dev *cdev,
        }
 
        /* Sanitize doorbell address */
-       if (!qed_db_rec_sanity(cdev, db_addr, db_data))
+       if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data))
                return -EINVAL;
 
        /* Obtain hwfn from doorbell address */
@@ -205,10 +209,6 @@ int qed_db_recovery_del(struct qed_dev *cdev,
                return 0;
        }
 
-       /* Sanitize doorbell address */
-       if (!qed_db_rec_sanity(cdev, db_addr, db_data))
-               return -EINVAL;
-
        /* Obtain hwfn from doorbell address */
        p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
 
@@ -300,31 +300,24 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
 
 /* Ring the doorbell of a single doorbell recovery entry */
 static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
-                                struct qed_db_recovery_entry *db_entry,
-                                enum qed_db_rec_exec db_exec)
-{
-       if (db_exec != DB_REC_ONCE) {
-               /* Print according to width */
-               if (db_entry->db_width == DB_REC_WIDTH_32B) {
-                       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
-                                  "%s doorbell address %p data %x\n",
-                                  db_exec == DB_REC_DRY_RUN ?
-                                  "would have rung" : "ringing",
-                                  db_entry->db_addr,
-                                  *(u32 *)db_entry->db_data);
-               } else {
-                       DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
-                                  "%s doorbell address %p data %llx\n",
-                                  db_exec == DB_REC_DRY_RUN ?
-                                  "would have rung" : "ringing",
-                                  db_entry->db_addr,
-                                  *(u64 *)(db_entry->db_data));
-               }
+                                struct qed_db_recovery_entry *db_entry)
+{
+       /* Print according to width */
+       if (db_entry->db_width == DB_REC_WIDTH_32B) {
+               DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                          "ringing doorbell address %p data %x\n",
+                          db_entry->db_addr,
+                          *(u32 *)db_entry->db_data);
+       } else {
+               DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+                          "ringing doorbell address %p data %llx\n",
+                          db_entry->db_addr,
+                          *(u64 *)(db_entry->db_data));
        }
 
        /* Sanity */
        if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr,
-                              db_entry->db_data))
+                              db_entry->db_width, db_entry->db_data))
                return;
 
        /* Flush the write combined buffer. Since there are multiple doorbelling
@@ -334,14 +327,12 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
        wmb();
 
        /* Ring the doorbell */
-       if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
-               if (db_entry->db_width == DB_REC_WIDTH_32B)
-                       DIRECT_REG_WR(db_entry->db_addr,
-                                     *(u32 *)(db_entry->db_data));
-               else
-                       DIRECT_REG_WR64(db_entry->db_addr,
-                                       *(u64 *)(db_entry->db_data));
-       }
+       if (db_entry->db_width == DB_REC_WIDTH_32B)
+               DIRECT_REG_WR(db_entry->db_addr,
+                             *(u32 *)(db_entry->db_data));
+       else
+               DIRECT_REG_WR64(db_entry->db_addr,
+                               *(u64 *)(db_entry->db_data));
 
        /* Flush the write combined buffer. Next doorbell may come from a
         * different entity to the same address...
@@ -350,29 +341,21 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
 }
 
 /* Traverse the doorbell recovery entry list and ring all the doorbells */
-void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
-                            enum qed_db_rec_exec db_exec)
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn)
 {
        struct qed_db_recovery_entry *db_entry = NULL;
 
-       if (db_exec != DB_REC_ONCE) {
-               DP_NOTICE(p_hwfn,
-                         "Executing doorbell recovery. Counter was %d\n",
-                         p_hwfn->db_recovery_info.db_recovery_counter);
+       DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n",
+                 p_hwfn->db_recovery_info.db_recovery_counter);
 
-               /* Track amount of times recovery was executed */
-               p_hwfn->db_recovery_info.db_recovery_counter++;
-       }
+       /* Track amount of times recovery was executed */
+       p_hwfn->db_recovery_info.db_recovery_counter++;
 
        /* Protect the list */
        spin_lock_bh(&p_hwfn->db_recovery_info.lock);
        list_for_each_entry(db_entry,
-                           &p_hwfn->db_recovery_info.list, list_entry) {
-               qed_db_recovery_ring(p_hwfn, db_entry, db_exec);
-               if (db_exec == DB_REC_ONCE)
-                       break;
-       }
-
+                           &p_hwfn->db_recovery_info.list, list_entry)
+               qed_db_recovery_ring(p_hwfn, db_entry);
        spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
 }
 
index e23980e301b6a2be7f015d6a0c6f6aaadbf788b3..8848d5bed6e5c58a188900bf9ad5710529d66b51 100644 (file)
@@ -378,6 +378,9 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
        u32 count = QED_DB_REC_COUNT;
        u32 usage = 1;
 
+       /* Flush any pending (e)dpms as they may never arrive */
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
+
        /* wait for usage to zero or count to run out. This is necessary since
         * EDPM doorbell transactions can take multiple 64b cycles, and as such
         * can "split" over the pci. Possibly, the doorbell drop can happen with
@@ -406,51 +409,74 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
 
 int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u32 overflow;
+       u32 attn_ovfl, cur_ovfl;
        int rc;
 
-       overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
-       DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow);
-       if (!overflow) {
-               qed_db_recovery_execute(p_hwfn, DB_REC_ONCE);
+       attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT,
+                                      &p_hwfn->db_recovery_info.overflow);
+       cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+       if (!cur_ovfl && !attn_ovfl)
                return 0;
-       }
 
-       if (qed_edpm_enabled(p_hwfn)) {
+       DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n",
+                 attn_ovfl, cur_ovfl);
+
+       if (cur_ovfl && !p_hwfn->db_bar_no_edpm) {
                rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
                if (rc)
                        return rc;
        }
 
-       /* Flush any pending (e)dpm as they may never arrive */
-       qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
-
        /* Release overflow sticky indication (stop silently dropping everything) */
        qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
 
        /* Repeat all last doorbells (doorbell drop recovery) */
-       qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
+       qed_db_recovery_execute(p_hwfn);
 
        return 0;
 }
 
-static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn)
 {
-       u32 int_sts, first_drop_reason, details, address, all_drops_reason;
        struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+       u32 overflow;
        int rc;
 
-       int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
-       DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
+       overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+       if (!overflow)
+               goto out;
+
+       /* Run PF doorbell recovery in next periodic handler */
+       set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow);
+
+       if (!p_hwfn->db_bar_no_edpm) {
+               rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
+               if (rc)
+                       goto out;
+       }
+
+       qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+out:
+       /* Schedule the handler even if overflow was not detected */
+       qed_periodic_db_rec_start(p_hwfn);
+}
+
+static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn)
+{
+       u32 int_sts, first_drop_reason, details, address, all_drops_reason;
+       struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
 
        /* int_sts may be zero since all PFs were interrupted for doorbell
         * overflow but another one already handled it. Can abort here. If
         * This PF also requires overflow recovery we will be interrupted again.
         * The masked almost full indication may also be set. Ignoring.
         */
+       int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
        if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
                return 0;
 
+       DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
+
        /* check if db_drop or overflow happened */
        if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
                       DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
@@ -477,11 +503,6 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
                          GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
                          first_drop_reason, all_drops_reason);
 
-               rc = qed_db_rec_handler(p_hwfn, p_ptt);
-               qed_periodic_db_rec_start(p_hwfn);
-               if (rc)
-                       return rc;
-
                /* Clear the doorbell drop details and prepare for next drop */
                qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
 
@@ -507,6 +528,25 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
        return -EINVAL;
 }
 
+static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+{
+       p_hwfn->db_recovery_info.dorq_attn = true;
+       qed_dorq_attn_overflow(p_hwfn);
+
+       return qed_dorq_attn_int_sts(p_hwfn);
+}
+
+static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn)
+{
+       if (p_hwfn->db_recovery_info.dorq_attn)
+               goto out;
+
+       /* Call DORQ callback if the attention was missed */
+       qed_dorq_attn_cb(p_hwfn);
+out:
+       p_hwfn->db_recovery_info.dorq_attn = false;
+}
+
 /* Instead of major changes to the data-structure, we have a some 'special'
  * identifiers for sources that changed meaning between adapters.
  */
@@ -1080,6 +1120,9 @@ static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
                }
        }
 
+       /* Handle missed DORQ attention */
+       qed_dorq_attn_handler(p_hwfn);
+
        /* Clear IGU indication for the deasserted bits */
        DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
                                    GTT_BAR0_MAP_REG_IGU_CMD +
index 1f356ed4f761e72486df4b57b1994e8f2dd89032..d473b522afc5137f69edece72c535397623ad05d 100644 (file)
@@ -192,8 +192,8 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
 
 /**
  * @brief - Doorbell Recovery handler.
- *          Run DB_REAL_DEAL doorbell recovery in case of PF overflow
- *          (and flush DORQ if needed), otherwise run DB_REC_ONCE.
+ *          Run doorbell recovery in case of PF overflow (and flush DORQ if
+ *          needed).
  *
  * @param p_hwfn
  * @param p_ptt
index f164d4acebcb43a4cd7b2858ad31e95e80467b74..6de23b56b2945c55118cbc3464a46881031583eb 100644 (file)
@@ -970,7 +970,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
        }
 }
 
-#define QED_PERIODIC_DB_REC_COUNT              100
+#define QED_PERIODIC_DB_REC_COUNT              10
 #define QED_PERIODIC_DB_REC_INTERVAL_MS                100
 #define QED_PERIODIC_DB_REC_INTERVAL \
        msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
index 9faaa6df78ed99b8b20b7f78b9efa9d4113b74e3..2f318aaf2b05d8145d4a0a4c45421fbff0bad455 100644 (file)
@@ -1591,7 +1591,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
                        p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
                } else {
                        DP_INFO(p_hwfn,
-                               "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
+                               "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
                                vf->abs_vf_id,
                                req->vfdev_info.eth_fp_hsi_major,
                                req->vfdev_info.eth_fp_hsi_minor,
index 5f3f42a25361679220fcc55224fcb2aa46adec03..bddb2b5982dcfedff2e8139741be978a1fc40e95 100644 (file)
@@ -490,18 +490,17 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
 
        ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
        if (IS_ERR(ptp->clock)) {
-               rc = -EINVAL;
                DP_ERR(edev, "PTP clock registration failed\n");
+               qede_ptp_disable(edev);
+               rc = -EINVAL;
                goto err2;
        }
 
        return 0;
 
-err2:
-       qede_ptp_disable(edev);
-       ptp->clock = NULL;
 err1:
        kfree(ptp);
+err2:
        edev->ptp = NULL;
 
        return rc;
index 0c443ea98479ac0971a6e36c28bd8bde2f080bfa..374a4d4371f99f23fe9d4507e0cbd33e177fbd85 100644 (file)
@@ -497,7 +497,7 @@ struct qlcnic_hardware_context {
        u16 board_type;
        u16 supported_type;
 
-       u16 link_speed;
+       u32 link_speed;
        u16 link_duplex;
        u16 link_autoneg;
        u16 module_type;
index 3b0adda7cc9c66769f84a1047a91aaa33c7939c8..a4cd6f2cfb862cb25315823d155b5497e59f5c2f 100644 (file)
@@ -1048,6 +1048,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
 
        for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
                skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
+               if (!skb)
+                       break;
                qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
                skb_put(skb, QLCNIC_ILB_PKT_SIZE);
                adapter->ahw->diag_cnt = 0;
index cfb67b7465958ec4eb6ae8aa68003a777052310b..58e0ca9093d3d9b4f08fe4bd2a8b3c3db23267a0 100644 (file)
@@ -482,7 +482,7 @@ static void hardware_init(struct net_device *dev)
        write_reg_high(ioaddr, IMR, ISRh_RxErr);
 
        lp->tx_unit_busy = 0;
-    lp->pac_cnt_in_tx_buf = 0;
+       lp->pac_cnt_in_tx_buf = 0;
        lp->saved_tx_size = 0;
 }
 
index c29dde0640784b57a687888c605fb2e52c1b5117..ed651dde6ef9ee8970bc68acb9c6fb9282411189 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/firmware.h>
 #include <linux/prefetch.h>
+#include <linux/pci-aspm.h>
 #include <linux/ipv6.h>
 #include <net/ip6_checksum.h>
 
@@ -678,6 +679,7 @@ struct rtl8169_private {
                struct work_struct work;
        } wk;
 
+       unsigned irq_enabled:1;
        unsigned supports_gmii:1;
        dma_addr_t counters_phys_addr;
        struct rtl8169_counters *counters;
@@ -1293,6 +1295,7 @@ static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
 static void rtl_irq_disable(struct rtl8169_private *tp)
 {
        RTL_W16(tp, IntrMask, 0);
+       tp->irq_enabled = 0;
 }
 
 #define RTL_EVENT_NAPI_RX      (RxOK | RxErr)
@@ -1301,6 +1304,7 @@ static void rtl_irq_disable(struct rtl8169_private *tp)
 
 static void rtl_irq_enable(struct rtl8169_private *tp)
 {
+       tp->irq_enabled = 1;
        RTL_W16(tp, IntrMask, tp->irq_mask);
 }
 
@@ -5457,7 +5461,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
        tp->cp_cmd |= PktCntrDisable | INTT_1;
        RTL_W16(tp, CPlusCmd, tp->cp_cmd);
 
-       RTL_W16(tp, IntrMitigate, 0x5151);
+       RTL_W16(tp, IntrMitigate, 0x5100);
 
        /* Work around for RxFIFO overflow. */
        if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
@@ -6520,9 +6524,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
 {
        struct rtl8169_private *tp = dev_instance;
        u16 status = RTL_R16(tp, IntrStatus);
-       u16 irq_mask = RTL_R16(tp, IntrMask);
 
-       if (status == 0xffff || !(status & irq_mask))
+       if (!tp->irq_enabled || status == 0xffff || !(status & tp->irq_mask))
                return IRQ_NONE;
 
        if (unlikely(status & SYSErr)) {
@@ -6540,7 +6543,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
                set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
        }
 
-       if (status & RTL_EVENT_NAPI) {
+       if (status & (RTL_EVENT_NAPI | LinkChg)) {
                rtl_irq_disable(tp);
                napi_schedule_irqoff(&tp->napi);
        }
@@ -7350,6 +7353,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (rc)
                return rc;
 
+       /* Disable ASPM completely as that cause random device stop working
+        * problems as well as full system hangs for some PCIe devices users.
+        */
+       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+
        /* enable device (incl. PCI PM wakeup and hotplug setup) */
        rc = pcim_enable_device(pdev);
        if (rc < 0) {
index 6073387511f887e4b0cf069041a0b04b759572d1..67f9bb6e941b7ed2467dcfd78a49a0e6c2c8165d 100644 (file)
@@ -730,10 +730,10 @@ static u16 sis900_default_phy(struct net_device * net_dev)
                status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
 
                /* Link ON & Not select default PHY & not ghost PHY */
-                if ((status & MII_STAT_LINK) && !default_phy &&
-                                       (phy->phy_types != UNKNOWN))
-                       default_phy = phy;
-                else {
+               if ((status & MII_STAT_LINK) && !default_phy &&
+                   (phy->phy_types != UNKNOWN)) {
+                       default_phy = phy;
+               } else {
                        status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL);
                        mdio_write(net_dev, phy->phy_addr, MII_CONTROL,
                                status | MII_CNTL_AUTO | MII_CNTL_ISOLATE);
@@ -741,7 +741,7 @@ static u16 sis900_default_phy(struct net_device * net_dev)
                                phy_home = phy;
                        else if(phy->phy_types == LAN)
                                phy_lan = phy;
-                }
+               }
        }
 
        if (!default_phy && phy_home)
index a18149720aa2eadcd5ba9690bad3d568f5aeb812..cba5881b2746a36da1b710c5be14ed8ba5a7c080 100644 (file)
@@ -673,7 +673,8 @@ static void netsec_process_tx(struct netsec_priv *priv)
 }
 
 static void *netsec_alloc_rx_data(struct netsec_priv *priv,
-                                 dma_addr_t *dma_handle, u16 *desc_len)
+                                 dma_addr_t *dma_handle, u16 *desc_len,
+                                 bool napi)
 {
        size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
        size_t payload_len = NETSEC_RX_BUF_SZ;
@@ -682,7 +683,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
 
        total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD);
 
-       buf = napi_alloc_frag(total_len);
+       buf = napi ? napi_alloc_frag(total_len) : netdev_alloc_frag(total_len);
        if (!buf)
                return NULL;
 
@@ -765,7 +766,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
                /* allocate a fresh buffer and map it to the hardware.
                 * This will eventually replace the old buffer in the hardware
                 */
-               buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
+               buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len,
+                                               true);
                if (unlikely(!buf_addr))
                        break;
 
@@ -1069,7 +1071,8 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
                void *buf;
                u16 len;
 
-               buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
+               buf = netsec_alloc_rx_data(priv, &dma_handle, &len,
+                                          false);
                if (!buf) {
                        netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
                        goto err_out;
index 40d6356a7e73c213f0d1d073387b8605bb4f3726..3dfb07a78952533420da7cb1cb6b87b171d91661 100644 (file)
 /* Specific functions used for Ring mode */
 
 /* Enhanced descriptors */
-static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
+                                          int bfsize)
 {
-       p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
-                       << ERDES1_BUFFER2_SIZE_SHIFT)
-                  & ERDES1_BUFFER2_SIZE_MASK);
+       if (bfsize == BUF_SIZE_16KiB)
+               p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
+                               << ERDES1_BUFFER2_SIZE_SHIFT)
+                          & ERDES1_BUFFER2_SIZE_MASK);
 
        if (end)
                p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
 }
 
 /* Normal descriptors */
-static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
 {
-       p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
-                               << RDES1_BUFFER2_SIZE_SHIFT)
-                   & RDES1_BUFFER2_SIZE_MASK);
+       if (bfsize >= BUF_SIZE_2KiB) {
+               int bfsize2;
+
+               bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
+               p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
+                           & RDES1_BUFFER2_SIZE_MASK);
+       }
 
        if (end)
                p->des1 |= cpu_to_le32(RDES1_END_RING);
index 062a600fa5a76310571f9aa6db84639c81546c09..21428537e231490db23f5ce539a95e15367ec6ed 100644 (file)
@@ -333,6 +333,9 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
         */
        dwmac->irq_pwr_wakeup = platform_get_irq_byname(pdev,
                                                        "stm32_pwr_wakeup");
+       if (dwmac->irq_pwr_wakeup == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
+
        if (!dwmac->clk_eth_ck && dwmac->irq_pwr_wakeup >= 0) {
                err = device_init_wakeup(&pdev->dev, true);
                if (err) {
index 7fbb6a4dbf5107723f16b825e7a3b1577c97254a..e061e9f5fad71f065440605646da812d1b9daf51 100644 (file)
@@ -296,7 +296,7 @@ exit:
 }
 
 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
-                                  int mode, int end)
+                                  int mode, int end, int bfsize)
 {
        dwmac4_set_rx_owner(p, disable_rx_ic);
 }
index 1d858fdec99718ec63a5fa1064fe9dd99670d2e0..98fa471da7c0f2764729f98c7044f52e068c1db9 100644 (file)
@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
 }
 
 static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
-                                 int mode, int end)
+                                 int mode, int end, int bfsize)
 {
        dwxgmac2_set_rx_owner(p, disable_rx_ic);
 }
index 5ef91a790f9d16fbd122f71e130cf7ecf5249a68..5202d6ad79194b0ed9134a7905d0aaa4309c6822 100644 (file)
@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
        if (unlikely(rdes0 & RDES0_OWN))
                return dma_own;
 
+       if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
+               stats->rx_length_errors++;
+               return discard_frame;
+       }
+
        if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
                if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
                        x->rx_desc++;
@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
         * It doesn't match with the information reported into the databook.
         * At any rate, we need to understand if the CSUM hw computation is ok
         * and report this info to the upper layers. */
-       ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
-                                !!(rdes0 & RDES0_FRAME_TYPE),
-                                !!(rdes0 & ERDES0_RX_MAC_ADDR));
+       if (likely(ret == good_frame))
+               ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
+                                        !!(rdes0 & RDES0_FRAME_TYPE),
+                                        !!(rdes0 & ERDES0_RX_MAC_ADDR));
 
        if (unlikely(rdes0 & RDES0_DRIBBLING))
                x->dribbling_bit++;
@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
 }
 
 static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
-                                 int mode, int end)
+                                 int mode, int end, int bfsize)
 {
+       int bfsize1;
+
        p->des0 |= cpu_to_le32(RDES0_OWN);
-       p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
+
+       bfsize1 = min(bfsize, BUF_SIZE_8KiB);
+       p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
 
        if (mode == STMMAC_CHAIN_MODE)
                ehn_desc_rx_set_on_chain(p);
        else
-               ehn_desc_rx_set_on_ring(p, end);
+               ehn_desc_rx_set_on_ring(p, end, bfsize);
 
        if (disable_rx_ic)
                p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
index 92b8944f26e3c8566d9e68de4820d06231ca10bd..5bb00234d961c6a5a2385c90bc3fdf54ff96e4ca 100644 (file)
@@ -33,7 +33,7 @@ struct dma_extended_desc;
 struct stmmac_desc_ops {
        /* DMA RX descriptor ring initialization */
        void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
-                       int end);
+                       int end, int bfsize);
        /* DMA TX descriptor ring initialization */
        void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
        /* Invoked by the xmit function to prepare the tx descriptor */
index de65bb29feba967cc7a0d6ff3184998f359dde34..6d690678c20e11bf8594729524fafa238bb1c4bb 100644 (file)
@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
                return dma_own;
 
        if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
-               pr_warn("%s: Oversized frame spanned multiple buffers\n",
-                       __func__);
                stats->rx_length_errors++;
                return discard_frame;
        }
@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
 }
 
 static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
-                              int end)
+                              int end, int bfsize)
 {
+       int bfsize1;
+
        p->des0 |= cpu_to_le32(RDES0_OWN);
-       p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
+
+       bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
+       p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK);
 
        if (mode == STMMAC_CHAIN_MODE)
                ndesc_rx_set_on_chain(p, end);
        else
-               ndesc_rx_set_on_ring(p, end);
+               ndesc_rx_set_on_ring(p, end, bfsize);
 
        if (disable_rx_ic)
                p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
index d8c5bc4122195d73f7150f2775797cc6ba9a3393..4d9bcb4d0378319d2d71d61a6e751ab9d141083d 100644 (file)
@@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
 
                desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
                stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
-                               STMMAC_RING_MODE, 1, false, skb->len);
+                               STMMAC_RING_MODE, 0, false, skb->len);
                tx_q->tx_skbuff[entry] = NULL;
                entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
 
@@ -79,7 +79,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
 
                desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
                stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
-                               STMMAC_RING_MODE, 1, true, skb->len);
+                               STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
+                               skb->len);
        } else {
                des2 = dma_map_single(priv->device, skb->data,
                                      nopaged_len, DMA_TO_DEVICE);
@@ -91,7 +92,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
                tx_q->tx_skbuff_dma[entry].is_jumbo = true;
                desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
                stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
-                               STMMAC_RING_MODE, 1, true, skb->len);
+                               STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
+                               skb->len);
        }
 
        tx_q->cur_tx = entry;
@@ -111,10 +113,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc)
 
 static void refill_desc3(void *priv_ptr, struct dma_desc *p)
 {
-       struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+       struct stmmac_rx_queue *rx_q = priv_ptr;
+       struct stmmac_priv *priv = rx_q->priv_data;
 
        /* Fill DES3 in case of RING mode */
-       if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
+       if (priv->dma_buf_sz == BUF_SIZE_16KiB)
                p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
 }
 
index 97c5e1aad88f979208c80efe30d19cfcc5ba05e6..48712437d0da8039e74c8cc34d8063ac666c03a8 100644 (file)
@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
                if (priv->extend_desc)
                        stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
                                        priv->use_riwt, priv->mode,
-                                       (i == DMA_RX_SIZE - 1));
+                                       (i == DMA_RX_SIZE - 1),
+                                       priv->dma_buf_sz);
                else
                        stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
                                        priv->use_riwt, priv->mode,
-                                       (i == DMA_RX_SIZE - 1));
+                                       (i == DMA_RX_SIZE - 1),
+                                       priv->dma_buf_sz);
 }
 
 /**
@@ -2614,8 +2616,6 @@ static int stmmac_open(struct net_device *dev)
        u32 chan;
        int ret;
 
-       stmmac_check_ether_addr(priv);
-
        if (priv->hw->pcs != STMMAC_PCS_RGMII &&
            priv->hw->pcs != STMMAC_PCS_TBI &&
            priv->hw->pcs != STMMAC_PCS_RTBI) {
@@ -3216,14 +3216,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
                                csum_insertion, priv->mode, 1, last_segment,
                                skb->len);
-
-               /* The own bit must be the latest setting done when prepare the
-                * descriptor and then barrier is needed to make sure that
-                * all is coherent before granting the DMA engine.
-                */
-               wmb();
+       } else {
+               stmmac_set_tx_owner(priv, first);
        }
 
+       /* The own bit must be the latest setting done when prepare the
+        * descriptor and then barrier is needed to make sure that
+        * all is coherent before granting the DMA engine.
+        */
+       wmb();
+
        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
 
        stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -3350,9 +3352,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
 {
        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
        struct stmmac_channel *ch = &priv->channel[queue];
-       unsigned int entry = rx_q->cur_rx;
+       unsigned int next_entry = rx_q->cur_rx;
        int coe = priv->hw->rx_csum;
-       unsigned int next_entry;
        unsigned int count = 0;
        bool xmac;
 
@@ -3370,10 +3371,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
        }
        while (count < limit) {
-               int status;
+               int entry, status;
                struct dma_desc *p;
                struct dma_desc *np;
 
+               entry = next_entry;
+
                if (priv->extend_desc)
                        p = (struct dma_desc *)(rx_q->dma_erx + entry);
                else
@@ -3429,11 +3432,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                         *  ignored
                         */
                        if (frame_len > priv->dma_buf_sz) {
-                               netdev_err(priv->dev,
-                                          "len %d larger than size (%d)\n",
-                                          frame_len, priv->dma_buf_sz);
+                               if (net_ratelimit())
+                                       netdev_err(priv->dev,
+                                                  "len %d larger than size (%d)\n",
+                                                  frame_len, priv->dma_buf_sz);
                                priv->dev->stats.rx_length_errors++;
-                               break;
+                               continue;
                        }
 
                        /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
@@ -3468,7 +3472,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                                                dev_warn(priv->device,
                                                         "packet dropped\n");
                                        priv->dev->stats.rx_dropped++;
-                                       break;
+                                       continue;
                                }
 
                                dma_sync_single_for_cpu(priv->device,
@@ -3488,11 +3492,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                        } else {
                                skb = rx_q->rx_skbuff[entry];
                                if (unlikely(!skb)) {
-                                       netdev_err(priv->dev,
-                                                  "%s: Inconsistent Rx chain\n",
-                                                  priv->dev->name);
+                                       if (net_ratelimit())
+                                               netdev_err(priv->dev,
+                                                          "%s: Inconsistent Rx chain\n",
+                                                          priv->dev->name);
                                        priv->dev->stats.rx_dropped++;
-                                       break;
+                                       continue;
                                }
                                prefetch(skb->data - NET_IP_ALIGN);
                                rx_q->rx_skbuff[entry] = NULL;
@@ -3527,7 +3532,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                        priv->dev->stats.rx_packets++;
                        priv->dev->stats.rx_bytes += frame_len;
                }
-               entry = next_entry;
        }
 
        stmmac_rx_refill(priv, queue);
@@ -4297,6 +4301,8 @@ int stmmac_dvr_probe(struct device *device,
        if (ret)
                goto error_hw_init;
 
+       stmmac_check_ether_addr(priv);
+
        /* Configure real RX and TX queues */
        netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
        netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
index d819e8eaba1225dc5e9b188e42636721cc66a4c0..26db6aa002d1975adf3e07bbc3936a2d4c43ef87 100644 (file)
@@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = {
                },
                .driver_data = (void *)&galileo_stmmac_dmi_data,
        },
+       /*
+        * There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
+        * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
+        * has only one pci network device while other asset tags are
+        * for IOT2040 which has two.
+        */
        {
                .matches = {
                        DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
@@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = {
        {
                .matches = {
                        DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
-                       DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
-                                       "6ES7647-0AA00-1YA2"),
                },
                .driver_data = (void *)&iot2040_stmmac_dmi_data,
        },
index 5174d318901e0f74aa6fa6c7e12b29bfe2b362c7..0a920c5936b24e1a14a8625a63f9bf2eed019ccb 100644 (file)
@@ -3657,12 +3657,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
 
        ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
                                gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
-       if (ret)
+       if (ret) {
+               of_node_put(interfaces);
                return ret;
+       }
 
        ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
-       if (ret)
+       if (ret) {
+               of_node_put(interfaces);
                return ret;
+       }
 
        /* Create network interfaces */
        INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
index ec7e7ec24ff910f7db36f9da1bbcd8c04ccf949a..4041c75997ba5ed52803b69b4c2e19bf41dee8e8 100644 (file)
@@ -1575,12 +1575,14 @@ static int axienet_probe(struct platform_device *pdev)
        ret = of_address_to_resource(np, 0, &dmares);
        if (ret) {
                dev_err(&pdev->dev, "unable to get DMA resource\n");
+               of_node_put(np);
                goto free_netdev;
        }
        lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
        if (IS_ERR(lp->dma_regs)) {
                dev_err(&pdev->dev, "could not map DMA regs\n");
                ret = PTR_ERR(lp->dma_regs);
+               of_node_put(np);
                goto free_netdev;
        }
        lp->rx_irq = irq_of_parse_and_map(np, 1);
index e859ae2e42d5a152a567de048e898eeafa99fcb5..49f41b64077bb9877d74ea40bbddffea5fe835d4 100644 (file)
@@ -987,6 +987,7 @@ struct netvsc_device {
 
        wait_queue_head_t wait_drain;
        bool destroy;
+       bool tx_disable; /* if true, do not wake up queue again */
 
        /* Receive buffer allocated by us but manages by NetVSP */
        void *recv_buf;
index 813d195bbd57fed2ef96ea708b637ca8197be458..e0dce373cdd9d875ded78bff545d76b32920f69c 100644 (file)
@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
 
        init_waitqueue_head(&net_device->wait_drain);
        net_device->destroy = false;
+       net_device->tx_disable = false;
 
        net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
        net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
        } else {
                struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
 
-               if (netif_tx_queue_stopped(txq) &&
+               if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
                    (hv_get_avail_to_write_percent(&channel->outbound) >
                     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
                        netif_tx_wake_queue(txq);
@@ -874,7 +875,8 @@ static inline int netvsc_send_pkt(
        } else if (ret == -EAGAIN) {
                netif_tx_stop_queue(txq);
                ndev_ctx->eth_stats.stop_queue++;
-               if (atomic_read(&nvchan->queue_sends) < 1) {
+               if (atomic_read(&nvchan->queue_sends) < 1 &&
+                   !net_device->tx_disable) {
                        netif_tx_wake_queue(txq);
                        ndev_ctx->eth_stats.wake_queue++;
                        ret = -ENOSPC;
index cf4897043e833618b5f5d6d452914ddbeff0e00d..b20fb0fb595bde3d7455e2c6214812fef6732017 100644 (file)
@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
        rcu_read_unlock();
 }
 
+static void netvsc_tx_enable(struct netvsc_device *nvscdev,
+                            struct net_device *ndev)
+{
+       nvscdev->tx_disable = false;
+       virt_wmb(); /* ensure queue wake up mechanism is on */
+
+       netif_tx_wake_all_queues(ndev);
+}
+
 static int netvsc_open(struct net_device *net)
 {
        struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
        rdev = nvdev->extension;
        if (!rdev->link_state) {
                netif_carrier_on(net);
-               netif_tx_wake_all_queues(net);
+               netvsc_tx_enable(nvdev, net);
        }
 
        if (vf_netdev) {
@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
        }
 }
 
+static void netvsc_tx_disable(struct netvsc_device *nvscdev,
+                             struct net_device *ndev)
+{
+       if (nvscdev) {
+               nvscdev->tx_disable = true;
+               virt_wmb(); /* ensure txq will not wake up after stop */
+       }
+
+       netif_tx_disable(ndev);
+}
+
 static int netvsc_close(struct net_device *net)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
        struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
        int ret;
 
-       netif_tx_disable(net);
+       netvsc_tx_disable(nvdev, net);
 
        /* No need to close rndis filter if it is removed already */
        if (!nvdev)
@@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev,
 
        /* If device was up (receiving) then shutdown */
        if (netif_running(ndev)) {
-               netif_tx_disable(ndev);
+               netvsc_tx_disable(nvdev, ndev);
 
                ret = rndis_filter_close(nvdev);
                if (ret) {
@@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w)
                if (rdev->link_state) {
                        rdev->link_state = false;
                        netif_carrier_on(net);
-                       netif_tx_wake_all_queues(net);
+                       netvsc_tx_enable(net_device, net);
                } else {
                        notify = true;
                }
@@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
                if (!rdev->link_state) {
                        rdev->link_state = true;
                        netif_carrier_off(net);
-                       netif_tx_stop_all_queues(net);
+                       netvsc_tx_disable(net_device, net);
                }
                kfree(event);
                break;
@@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w)
                if (!rdev->link_state) {
                        rdev->link_state = true;
                        netif_carrier_off(net);
-                       netif_tx_stop_all_queues(net);
+                       netvsc_tx_disable(net_device, net);
                        event->event = RNDIS_STATUS_MEDIA_CONNECT;
                        spin_lock_irqsave(&ndev_ctx->lock, flags);
                        list_add(&event->list, &ndev_ctx->reconfig_events);
index cd1d8faccca5fb36b488312d734d5e42cebb7b1a..cd6b95e673a58319a2f0ea0ed15445cc1782435f 100644 (file)
@@ -1268,6 +1268,10 @@ static int adf7242_probe(struct spi_device *spi)
        INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
        lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
                                             WQ_MEM_RECLAIM);
+       if (unlikely(!lp->wqueue)) {
+               ret = -ENOMEM;
+               goto err_hw_init;
+       }
 
        ret = adf7242_hw_init(lp);
        if (ret)
index b6743f03dce000578b65bf9a8afddd3c2613d628..3b88846de31b18236e423c3b941f69537ab3bcfa 100644 (file)
@@ -324,7 +324,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
                        goto out_err;
                }
 
-               genlmsg_reply(skb, info);
+               res = genlmsg_reply(skb, info);
                break;
        }
 
index c589f5ae75bb552f53b39eed367594bc3d420165..8bb53ec8d9cf296f4735af57d50de364ce68ec9b 100644 (file)
@@ -533,6 +533,8 @@ mcr20a_start(struct ieee802154_hw *hw)
        dev_dbg(printdev(lp), "no slotted operation\n");
        ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
                                 DAR_PHY_CTRL1_SLOTTED, 0x0);
+       if (ret < 0)
+               return ret;
 
        /* enable irq */
        enable_irq(lp->spi->irq);
@@ -540,11 +542,15 @@ mcr20a_start(struct ieee802154_hw *hw)
        /* Unmask SEQ interrupt */
        ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
                                 DAR_PHY_CTRL2_SEQMSK, 0x0);
+       if (ret < 0)
+               return ret;
 
        /* Start the RX sequence */
        dev_dbg(printdev(lp), "start the RX sequence\n");
        ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
                                 DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
+       if (ret < 0)
+               return ret;
 
        return 0;
 }
index 071869db44cf3e0b33cc75b3b4fd285212bf1c9d..520657945b8279debe7583d33c31c095899ddb7a 100644 (file)
@@ -7,6 +7,8 @@ menuconfig MDIO_DEVICE
        help
          MDIO devices and driver infrastructure code.
 
+if MDIO_DEVICE
+
 config MDIO_BUS
        tristate
        default m if PHYLIB=m
@@ -179,6 +181,7 @@ config MDIO_XGENE
          APM X-Gene SoC's.
 
 endif
+endif
 
 config PHYLINK
        tristate
index 9605d4fe540b1e4ed894d4ff5ab4f40e5d9caf16..cb86a3e90c7de3ff41a7d821c135aec8dad9eef0 100644 (file)
@@ -323,6 +323,19 @@ static int bcm54xx_config_init(struct phy_device *phydev)
 
        bcm54xx_phydsp_config(phydev);
 
+       /* Encode link speed into LED1 and LED3 pair (green/amber).
+        * Also flash these two LEDs on activity. This means configuring
+        * them for MULTICOLOR and encoding link/activity into them.
+        */
+       val = BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_MULTICOLOR1) |
+               BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_MULTICOLOR1);
+       bcm_phy_write_shadow(phydev, BCM5482_SHD_LEDS1, val);
+
+       val = BCM_LED_MULTICOLOR_IN_PHASE |
+               BCM5482_SHD_LEDS1_LED1(BCM_LED_MULTICOLOR_LINK_ACT) |
+               BCM5482_SHD_LEDS1_LED3(BCM_LED_MULTICOLOR_LINK_ACT);
+       bcm_phy_write_exp(phydev, BCM_EXP_MULTICOLOR, val);
+
        return 0;
 }
 
index bbd8c22067f3d2c4975757febf0658ddb1c3e8f7..97d45bd5b38e382b678dc3ce814f813cf045d7d6 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/netdevice.h>
 
 #define DP83822_PHY_ID         0x2000a240
+#define DP83825I_PHY_ID                0x2000a150
+
 #define DP83822_DEVADDR                0x1f
 
 #define MII_DP83822_PHYSCR     0x11
@@ -304,26 +306,30 @@ static int dp83822_resume(struct phy_device *phydev)
        return 0;
 }
 
+#define DP83822_PHY_DRIVER(_id, _name)                         \
+       {                                                       \
+               PHY_ID_MATCH_MODEL(_id),                        \
+               .name           = (_name),                      \
+               .features       = PHY_BASIC_FEATURES,           \
+               .soft_reset     = dp83822_phy_reset,            \
+               .config_init    = dp83822_config_init,          \
+               .get_wol = dp83822_get_wol,                     \
+               .set_wol = dp83822_set_wol,                     \
+               .ack_interrupt = dp83822_ack_interrupt,         \
+               .config_intr = dp83822_config_intr,             \
+               .suspend = dp83822_suspend,                     \
+               .resume = dp83822_resume,                       \
+       }
+
 static struct phy_driver dp83822_driver[] = {
-       {
-               .phy_id = DP83822_PHY_ID,
-               .phy_id_mask = 0xfffffff0,
-               .name = "TI DP83822",
-               .features = PHY_BASIC_FEATURES,
-               .config_init = dp83822_config_init,
-               .soft_reset = dp83822_phy_reset,
-               .get_wol = dp83822_get_wol,
-               .set_wol = dp83822_set_wol,
-               .ack_interrupt = dp83822_ack_interrupt,
-               .config_intr = dp83822_config_intr,
-               .suspend = dp83822_suspend,
-               .resume = dp83822_resume,
-        },
+       DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
+       DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
 };
 module_phy_driver(dp83822_driver);
 
 static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
        { DP83822_PHY_ID, 0xfffffff0 },
+       { DP83825I_PHY_ID, 0xfffffff0 },
        { },
 };
 MODULE_DEVICE_TABLE(mdio, dp83822_tbl);
index 3ccba37bd6ddeafd5c7f65e9e58f1ef5d57b2540..f76c4048b9780e0f058abc1cb8dbc1cf7bbe93eb 100644 (file)
@@ -1489,9 +1489,10 @@ static int marvell_get_sset_count(struct phy_device *phydev)
 
 static void marvell_get_strings(struct phy_device *phydev, u8 *data)
 {
+       int count = marvell_get_sset_count(phydev);
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
+       for (i = 0; i < count; i++) {
                strlcpy(data + i * ETH_GSTRING_LEN,
                        marvell_hw_stats[i].string, ETH_GSTRING_LEN);
        }
@@ -1519,9 +1520,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
 static void marvell_get_stats(struct phy_device *phydev,
                              struct ethtool_stats *stats, u64 *data)
 {
+       int count = marvell_get_sset_count(phydev);
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++)
+       for (i = 0; i < count; i++)
                data[i] = marvell_get_stat(phydev, i);
 }
 
index a238388eb1a5e09f138f5a63d627cc25f076da29..0eec2913c289b83a77a238aca2da64e558378336 100644 (file)
@@ -201,6 +201,7 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
 static int meson_gxl_config_intr(struct phy_device *phydev)
 {
        u16 val;
+       int ret;
 
        if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
                val = INTSRC_ANEG_PR
@@ -213,6 +214,11 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
                val = 0;
        }
 
+       /* Ack any pending IRQ */
+       ret = meson_gxl_ack_interrupt(phydev);
+       if (ret)
+               return ret;
+
        return phy_write(phydev, INTSRC_MASK, val);
 }
 
index 49fdd1ee798e4418f5145b00ad6573525eed0f82..77068c545de0d33607981e7a94a32bf7ed1ff34c 100644 (file)
@@ -1831,7 +1831,7 @@ int genphy_soft_reset(struct phy_device *phydev)
 {
        int ret;
 
-       ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
+       ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
        if (ret < 0)
                return ret;
 
index 92b64e254b44ed764d7db9039c949cbdb6d66597..7475cef17cf76ca09e59b6987b82010d6ce1076f 100644 (file)
@@ -159,6 +159,14 @@ static const struct spi_device_id ks8995_id[] = {
 };
 MODULE_DEVICE_TABLE(spi, ks8995_id);
 
+static const struct of_device_id ks8895_spi_of_match[] = {
+        { .compatible = "micrel,ks8995" },
+        { .compatible = "micrel,ksz8864" },
+        { .compatible = "micrel,ksz8795" },
+        { },
+ };
+MODULE_DEVICE_TABLE(of, ks8895_spi_of_match);
+
 static inline u8 get_chip_id(u8 val)
 {
        return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
@@ -526,6 +534,7 @@ static int ks8995_remove(struct spi_device *spi)
 static struct spi_driver ks8995_driver = {
        .driver = {
                .name       = "spi-ks8995",
+               .of_match_table = of_match_ptr(ks8895_spi_of_match),
        },
        .probe    = ks8995_probe,
        .remove   = ks8995_remove,
index f4e93f5fc2043ebb29c5b36e94afe49ec0c7d7ba..ea90db3c77058b6a799245bd5a3ff9f672b5da5e 100644 (file)
@@ -153,7 +153,7 @@ out_fail:
 void
 slhc_free(struct slcompress *comp)
 {
-       if ( comp == NULLSLCOMPR )
+       if ( IS_ERR_OR_NULL(comp) )
                return;
 
        if ( comp->tstate != NULLSLSTATE )
index 6ed96fdfd96dd5a858e8416fcfcfa8c78c628e2a..16963f7a88f748fd0946fafef5c5a477e138cf63 100644 (file)
@@ -1156,6 +1156,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
                return -EINVAL;
        }
 
+       if (netdev_has_upper_dev(dev, port_dev)) {
+               NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface");
+               netdev_err(dev, "Device %s is already an upper device of the team interface\n",
+                          portname);
+               return -EBUSY;
+       }
+
        if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
            vlan_uses_dev(dev)) {
                NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
@@ -1246,6 +1253,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
                goto err_option_port_add;
        }
 
+       /* set promiscuity level to new slave */
+       if (dev->flags & IFF_PROMISC) {
+               err = dev_set_promiscuity(port_dev, 1);
+               if (err)
+                       goto err_set_slave_promisc;
+       }
+
+       /* set allmulti level to new slave */
+       if (dev->flags & IFF_ALLMULTI) {
+               err = dev_set_allmulti(port_dev, 1);
+               if (err) {
+                       if (dev->flags & IFF_PROMISC)
+                               dev_set_promiscuity(port_dev, -1);
+                       goto err_set_slave_promisc;
+               }
+       }
+
        netif_addr_lock_bh(dev);
        dev_uc_sync_multiple(port_dev, dev);
        dev_mc_sync_multiple(port_dev, dev);
@@ -1262,6 +1286,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
 
        return 0;
 
+err_set_slave_promisc:
+       __team_option_inst_del_port(team, port);
+
 err_option_port_add:
        team_upper_dev_unlink(team, port);
 
@@ -1307,6 +1334,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
 
        team_port_disable(team, port);
        list_del_rcu(&port->list);
+
+       if (dev->flags & IFF_PROMISC)
+               dev_set_promiscuity(port_dev, -1);
+       if (dev->flags & IFF_ALLMULTI)
+               dev_set_allmulti(port_dev, -1);
+
        team_upper_dev_unlink(team, port);
        netdev_rx_handler_unregister(port_dev);
        team_port_disable_netpoll(port);
index 1d68921723dc08532b3f5321a52865076ad66336..e9ca1c088d0b11611e4d80268ced3806db05cffb 100644 (file)
@@ -1763,9 +1763,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        int skb_xdp = 1;
        bool frags = tun_napi_frags_enabled(tfile);
 
-       if (!(tun->dev->flags & IFF_UP))
-               return -EIO;
-
        if (!(tun->flags & IFF_NO_PI)) {
                if (len < sizeof(pi))
                        return -EINVAL;
@@ -1867,6 +1864,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                        err = skb_copy_datagram_from_iter(skb, 0, from, len);
 
                if (err) {
+                       err = -EFAULT;
+drop:
                        this_cpu_inc(tun->pcpu_stats->rx_dropped);
                        kfree_skb(skb);
                        if (frags) {
@@ -1874,7 +1873,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                                mutex_unlock(&tfile->napi_mutex);
                        }
 
-                       return -EFAULT;
+                       return err;
                }
        }
 
@@ -1958,6 +1957,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
            !tfile->detached)
                rxhash = __skb_get_hash_symmetric(skb);
 
+       rcu_read_lock();
+       if (unlikely(!(tun->dev->flags & IFF_UP))) {
+               err = -EIO;
+               rcu_read_unlock();
+               goto drop;
+       }
+
        if (frags) {
                /* Exercise flow dissector code path. */
                u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
@@ -1965,6 +1971,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                if (unlikely(headlen > skb_headlen(skb))) {
                        this_cpu_inc(tun->pcpu_stats->rx_dropped);
                        napi_free_frags(&tfile->napi);
+                       rcu_read_unlock();
                        mutex_unlock(&tfile->napi_mutex);
                        WARN_ON(1);
                        return -ENOMEM;
@@ -1992,6 +1999,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        } else {
                netif_rx_ni(skb);
        }
+       rcu_read_unlock();
 
        stats = get_cpu_ptr(tun->pcpu_stats);
        u64_stats_update_begin(&stats->syncp);
index 820a2fe7d027733eb1c9ccc54aa504a4088a6600..aff995be2a318796a832e19c5c3c3e3cfc5c9efd 100644 (file)
@@ -1301,6 +1301,20 @@ static const struct driver_info trendnet_info = {
        .tx_fixup       = aqc111_tx_fixup,
 };
 
+static const struct driver_info qnap_info = {
+       .description    = "QNAP QNA-UC5G1T USB to 5GbE Adapter",
+       .bind           = aqc111_bind,
+       .unbind         = aqc111_unbind,
+       .status         = aqc111_status,
+       .link_reset     = aqc111_link_reset,
+       .reset          = aqc111_reset,
+       .stop           = aqc111_stop,
+       .flags          = FLAG_ETHER | FLAG_FRAMING_AX |
+                         FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
+       .rx_fixup       = aqc111_rx_fixup,
+       .tx_fixup       = aqc111_tx_fixup,
+};
+
 static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
 {
        struct usbnet *dev = usb_get_intfdata(intf);
@@ -1455,6 +1469,7 @@ static const struct usb_device_id products[] = {
        {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
        {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
        {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)},
+       {AQC111_USB_ETH_DEV(0x1c04, 0x0015, qnap_info)},
        { },/* END */
 };
 MODULE_DEVICE_TABLE(usb, products);
index 5512a1038721459a727326bb8823e16c0886b7f1..3e9b2c319e45256865415da43386031c556d9e2b 100644 (file)
@@ -851,6 +851,14 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+/* QNAP QNA-UC5G1T USB to 5GbE Adapter (based on AQC111U) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(0x1c04, 0x0015, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET,
+                                     USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* WHITELIST!!!
  *
  * CDC Ether uses two interfaces, not necessarily consecutive.
index 74bebbdb4b158791135410d00591e84eb9da7073..679e404a5224fc33b8591b16a59e0910adb4c4cd 100644 (file)
@@ -1122,9 +1122,16 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x0846, 0x68d3, 8)},    /* Netgear Aircard 779S */
        {QMI_FIXED_INTF(0x12d1, 0x140c, 1)},    /* Huawei E173 */
        {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},    /* Huawei E1820 */
+       {QMI_FIXED_INTF(0x1435, 0x0918, 3)},    /* Wistron NeWeb D16Q1 */
+       {QMI_FIXED_INTF(0x1435, 0x0918, 4)},    /* Wistron NeWeb D16Q1 */
+       {QMI_FIXED_INTF(0x1435, 0x0918, 5)},    /* Wistron NeWeb D16Q1 */
+       {QMI_FIXED_INTF(0x1435, 0x3185, 4)},    /* Wistron NeWeb M18Q5 */
+       {QMI_FIXED_INTF(0x1435, 0xd111, 4)},    /* M9615A DM11-1 D51QC */
        {QMI_FIXED_INTF(0x1435, 0xd181, 3)},    /* Wistron NeWeb D18Q1 */
        {QMI_FIXED_INTF(0x1435, 0xd181, 4)},    /* Wistron NeWeb D18Q1 */
        {QMI_FIXED_INTF(0x1435, 0xd181, 5)},    /* Wistron NeWeb D18Q1 */
+       {QMI_FIXED_INTF(0x1435, 0xd182, 4)},    /* Wistron NeWeb D18 */
+       {QMI_FIXED_INTF(0x1435, 0xd182, 5)},    /* Wistron NeWeb D18 */
        {QMI_FIXED_INTF(0x1435, 0xd191, 4)},    /* Wistron NeWeb D19Q1 */
        {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
        {QMI_FIXED_INTF(0x16d8, 0x6003, 0)},    /* CMOTech 6003 */
@@ -1180,6 +1187,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x19d2, 0x0265, 4)},    /* ONDA MT8205 4G LTE */
        {QMI_FIXED_INTF(0x19d2, 0x0284, 4)},    /* ZTE MF880 */
        {QMI_FIXED_INTF(0x19d2, 0x0326, 4)},    /* ZTE MF821D */
+       {QMI_FIXED_INTF(0x19d2, 0x0396, 3)},    /* ZTE ZM8620 */
        {QMI_FIXED_INTF(0x19d2, 0x0412, 4)},    /* Telewell TW-LTE 4G */
        {QMI_FIXED_INTF(0x19d2, 0x1008, 4)},    /* ZTE (Vodafone) K3570-Z */
        {QMI_FIXED_INTF(0x19d2, 0x1010, 4)},    /* ZTE (Vodafone) K3571-Z */
@@ -1200,9 +1208,12 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
        {QMI_FIXED_INTF(0x19d2, 0x1426, 2)},    /* ZTE MF91 */
        {QMI_FIXED_INTF(0x19d2, 0x1428, 2)},    /* Telewell TW-LTE 4G v2 */
+       {QMI_FIXED_INTF(0x19d2, 0x1432, 3)},    /* ZTE ME3620 */
        {QMI_FIXED_INTF(0x19d2, 0x2002, 4)},    /* ZTE (Vodafone) K3765-Z */
+       {QMI_FIXED_INTF(0x2001, 0x7e16, 3)},    /* D-Link DWM-221 */
        {QMI_FIXED_INTF(0x2001, 0x7e19, 4)},    /* D-Link DWM-221 B1 */
        {QMI_FIXED_INTF(0x2001, 0x7e35, 4)},    /* D-Link DWM-222 */
+       {QMI_FIXED_INTF(0x2020, 0x2031, 4)},    /* Olicard 600 */
        {QMI_FIXED_INTF(0x2020, 0x2033, 4)},    /* BroadMobi BM806U */
        {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
index 7c1430ed02445b6e6f13c663b555ef550276c899..9ee4d7402ca23296091939a59a5a6fec459a8472 100644 (file)
@@ -875,6 +875,7 @@ static const struct net_device_ops vrf_netdev_ops = {
        .ndo_init               = vrf_dev_init,
        .ndo_uninit             = vrf_dev_uninit,
        .ndo_start_xmit         = vrf_xmit,
+       .ndo_set_mac_address    = eth_mac_addr,
        .ndo_get_stats64        = vrf_get_stats64,
        .ndo_add_slave          = vrf_add_slave,
        .ndo_del_slave          = vrf_del_slave,
@@ -1273,9 +1274,15 @@ static void vrf_setup(struct net_device *dev)
 
        /* default to no qdisc; user can add if desired */
        dev->priv_flags |= IFF_NO_QUEUE;
+       dev->priv_flags |= IFF_NO_RX_HANDLER;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 
-       dev->min_mtu = 0;
-       dev->max_mtu = 0;
+       /* VRF devices do not care about MTU, but if the MTU is set
+        * too low then the ipv4 and ipv6 protocols are disabled
+        * which breaks networking.
+        */
+       dev->min_mtu = IPV6_MIN_MTU;
+       dev->max_mtu = ETH_MAX_MTU;
 }
 
 static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
index 077f1b9f27616d34e966fc3760b40fd1f6a5915a..d76dfed8d9bbef1d1ae8470686e417af2e531ac8 100644 (file)
@@ -4335,10 +4335,8 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
                /* If vxlan->dev is in the same netns, it has already been added
                 * to the list by the previous loop.
                 */
-               if (!net_eq(dev_net(vxlan->dev), net)) {
-                       gro_cells_destroy(&vxlan->gro_cells);
+               if (!net_eq(dev_net(vxlan->dev), net))
                        unregister_netdevice_queue(vxlan->dev, head);
-               }
        }
 
        for (h = 0; h < PORT_HASH_SIZE; ++h)
index 24b983edb35756fe7586a3f196ccd12cd2484de6..eca87f7c5b6c1e53f7f1c1921bf653af20e1f213 100644 (file)
@@ -1855,7 +1855,7 @@ void ath10k_ce_dump_registers(struct ath10k *ar,
        struct ath10k_ce_crash_data ce_data;
        u32 addr, id;
 
-       lockdep_assert_held(&ar->data_lock);
+       lockdep_assert_held(&ar->dump_mutex);
 
        ath10k_err(ar, "Copy Engine register dump:\n");
 
index 835b8de92d55e6f94cffbc2f1449eb27acff72b3..aff585658fc0f6d1d542e7111c6ce1ef566167f8 100644 (file)
@@ -3119,6 +3119,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
                goto err_free_wq;
 
        mutex_init(&ar->conf_mutex);
+       mutex_init(&ar->dump_mutex);
        spin_lock_init(&ar->data_lock);
 
        INIT_LIST_HEAD(&ar->peers);
index e08a17b01e035dba87cc63a05d29cd59f0d732f4..e35aae5146f10615fcba3451713dc1b9a64a8f93 100644 (file)
@@ -1063,6 +1063,9 @@ struct ath10k {
        /* prevents concurrent FW reconfiguration */
        struct mutex conf_mutex;
 
+       /* protects coredump data */
+       struct mutex dump_mutex;
+
        /* protects shared structure data */
        spinlock_t data_lock;
 
index 33838d9c1cb6068b56099f403b6fd668b04fb620..45a355fb62b939b966bfcdf580b104646a6ce99d 100644 (file)
@@ -1102,7 +1102,7 @@ struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
 {
        struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
 
-       lockdep_assert_held(&ar->data_lock);
+       lockdep_assert_held(&ar->dump_mutex);
 
        if (ath10k_coredump_mask == 0)
                /* coredump disabled */
@@ -1146,7 +1146,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
        if (!buf)
                return NULL;
 
-       spin_lock_bh(&ar->data_lock);
+       mutex_lock(&ar->dump_mutex);
 
        dump_data = (struct ath10k_dump_file_data *)(buf);
        strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
@@ -1213,7 +1213,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
                sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
        }
 
-       spin_unlock_bh(&ar->data_lock);
+       mutex_unlock(&ar->dump_mutex);
 
        return dump_data;
 }
index a20ea270d519be335b9b0086b1d5f9c8ea3d385d..1acc622d218333ac131666536b1077fb1b9ee808 100644 (file)
@@ -2728,7 +2728,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
                        num_msdus++;
                        num_bytes += ret;
                }
-               ieee80211_return_txq(hw, txq);
+               ieee80211_return_txq(hw, txq, false);
                ieee80211_txq_schedule_end(hw, txq->ac);
 
                record->num_msdus = cpu_to_le16(num_msdus);
index b73c23d4ce86d0cd0631a4838b4ce3a150e34f49..9c703d287333e715349d818028f8db57cbd3dca0 100644 (file)
@@ -4089,7 +4089,7 @@ static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac)
                        if (ret < 0)
                                break;
                }
-               ieee80211_return_txq(hw, txq);
+               ieee80211_return_txq(hw, txq, false);
                ath10k_htt_tx_txq_update(hw, txq);
                if (ret == -EBUSY)
                        break;
@@ -4374,7 +4374,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
                if (ret < 0)
                        break;
        }
-       ieee80211_return_txq(hw, txq);
+       ieee80211_return_txq(hw, txq, false);
        ath10k_htt_tx_txq_update(hw, txq);
 out:
        ieee80211_txq_schedule_end(hw, ac);
@@ -5774,7 +5774,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
        }
 
        if (changed & BSS_CHANGED_MCAST_RATE &&
-           !WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) {
+           !ath10k_mac_vif_chan(arvif->vif, &def)) {
                band = def.chan->band;
                rateidx = vif->bss_conf.mcast_rate[band] - 1;
 
@@ -5812,7 +5812,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
        }
 
        if (changed & BSS_CHANGED_BASIC_RATES) {
-               if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) {
+               if (ath10k_mac_vif_chan(vif, &def)) {
                        mutex_unlock(&ar->conf_mutex);
                        return;
                }
index 271f92c24d4461045a8d84bd46aadcb22a7757e9..2c27f407a851f869368def6346066b1435362b2c 100644 (file)
@@ -1441,7 +1441,7 @@ static void ath10k_pci_dump_registers(struct ath10k *ar,
        __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
        int i, ret;
 
-       lockdep_assert_held(&ar->data_lock);
+       lockdep_assert_held(&ar->dump_mutex);
 
        ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
                                      hi_failure_state,
@@ -1656,7 +1656,7 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
        int ret, i;
        u8 *buf;
 
-       lockdep_assert_held(&ar->data_lock);
+       lockdep_assert_held(&ar->dump_mutex);
 
        if (!crash_data)
                return;
@@ -1734,14 +1734,19 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
        }
 }
 
-static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
+static void ath10k_pci_fw_dump_work(struct work_struct *work)
 {
+       struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,
+                                                dump_work);
        struct ath10k_fw_crash_data *crash_data;
+       struct ath10k *ar = ar_pci->ar;
        char guid[UUID_STRING_LEN + 1];
 
-       spin_lock_bh(&ar->data_lock);
+       mutex_lock(&ar->dump_mutex);
 
+       spin_lock_bh(&ar->data_lock);
        ar->stats.fw_crash_counter++;
+       spin_unlock_bh(&ar->data_lock);
 
        crash_data = ath10k_coredump_new(ar);
 
@@ -1756,11 +1761,18 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
        ath10k_ce_dump_registers(ar, crash_data);
        ath10k_pci_dump_memory(ar, crash_data);
 
-       spin_unlock_bh(&ar->data_lock);
+       mutex_unlock(&ar->dump_mutex);
 
        queue_work(ar->workqueue, &ar->restart_work);
 }
 
+static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       queue_work(ar->workqueue, &ar_pci->dump_work);
+}
+
 void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
                                        int force)
 {
@@ -3442,6 +3454,8 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
        spin_lock_init(&ar_pci->ps_lock);
        mutex_init(&ar_pci->ce_diag_mutex);
 
+       INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work);
+
        timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
 
        if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
index 3773c79f322f5e3d911b92c07cae29941e32bceb..4455ed6c5275ae1a0aa3e5f5859e835bc86eecd8 100644 (file)
@@ -121,6 +121,8 @@ struct ath10k_pci {
        /* For protecting ce_diag */
        struct mutex ce_diag_mutex;
 
+       struct work_struct dump_work;
+
        struct ath10k_ce ce;
        struct timer_list rx_post_retry;
 
index 773d428ff1b03328ca43c1c8db74103d8d846444..b17e1ca40995eab7b0f80c479f0cff7381801e76 100644 (file)
@@ -1938,12 +1938,15 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
                goto out;
 
        while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) {
+               bool force;
+
                tid = (struct ath_atx_tid *)queue->drv_priv;
 
                ret = ath_tx_sched_aggr(sc, txq, tid);
                ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret);
 
-               ieee80211_return_txq(hw, queue);
+               force = !skb_queue_empty(&tid->retry_q);
+               ieee80211_return_txq(hw, queue, force);
        }
 
 out:
index fdc56f821b5ac0961f8b503d5f918999a01e214e..0a87d87fbb4f5dcf7f9cd5f99fcfb33600ca2297 100644 (file)
@@ -82,6 +82,7 @@
 #define IWL_22000_HR_A0_FW_PRE         "iwlwifi-QuQnj-a0-hr-a0-"
 #define IWL_22000_SU_Z0_FW_PRE         "iwlwifi-su-z0-"
 #define IWL_QU_B_JF_B_FW_PRE           "iwlwifi-Qu-b0-jf-b0-"
+#define IWL_QUZ_A_HR_B_FW_PRE          "iwlwifi-QuZ-a0-hr-b0-"
 #define IWL_QNJ_B_JF_B_FW_PRE          "iwlwifi-QuQnj-b0-jf-b0-"
 #define IWL_CC_A_FW_PRE                        "iwlwifi-cc-a0-"
 #define IWL_22000_SO_A_JF_B_FW_PRE     "iwlwifi-so-a0-jf-b0-"
        IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
 #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
        IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
-#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
-       IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
+       IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
 #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
        IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
 #define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api)            \
@@ -200,7 +201,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
 #define IWL_DEVICE_AX210                                               \
        IWL_DEVICE_AX200_COMMON,                                        \
        .device_family = IWL_DEVICE_FAMILY_AX210,                       \
-       .base_params = &iwl_22000_base_params,                          \
+       .base_params = &iwl_22560_base_params,                          \
        .csr = &iwl_csr_v1,                                             \
        .min_txq_size = 128
 
@@ -235,8 +236,20 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
        .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
 };
 
-const struct iwl_cfg iwl22260_2ax_cfg = {
-       .name = "Intel(R) Wireless-AX 22260",
+const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
+       .name = "Intel(R) Wi-Fi 6 AX101",
+       .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+       IWL_DEVICE_22500,
+       /*
+        * This device doesn't support receiving BlockAck with a large bitmap
+        * so we need to restrict the size of transmitted aggregation to the
+        * HT size; mac80211 would otherwise pick the HE max (256) by default.
+        */
+       .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl_ax200_cfg_cc = {
+       .name = "Intel(R) Wi-Fi 6 AX200 160MHz",
        .fw_name_pre = IWL_CC_A_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -249,7 +262,7 @@ const struct iwl_cfg iwl22260_2ax_cfg = {
 };
 
 const struct iwl_cfg killer1650x_2ax_cfg = {
-       .name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (200NGW)",
+       .name = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)",
        .fw_name_pre = IWL_CC_A_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -262,7 +275,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = {
 };
 
 const struct iwl_cfg killer1650w_2ax_cfg = {
-       .name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (200D2W)",
+       .name = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)",
        .fw_name_pre = IWL_CC_A_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -328,7 +341,7 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
 };
 
 const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
-       .name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)",
+       .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
        .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -340,7 +353,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
 };
 
 const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
-       .name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)",
+       .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
        .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
        IWL_DEVICE_22500,
        /*
@@ -444,6 +457,7 @@ MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
index 575a7022d045be7ad37243b1ae4c454efebd24dc..3846064d51a5a7c14ba866070a14b5802474a73b 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -136,6 +136,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
        .ht_params = &iwl5000_ht_params,
        .led_mode = IWL_LED_BLINK,
        .internal_wimax_coex = true,
+       .csr = &iwl_csr_v1,
 };
 
 #define IWL_DEVICE_5150                                                \
index f119c49cd39cd516c09459f4f145f898bd4fe38b..d7380016f1c0d4f4d85fd9b063f7344f5bd954c0 100644 (file)
@@ -1614,6 +1614,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
        if (!range) {
                IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n",
                        le32_to_cpu(reg->region_id), type);
+               memset(*data, 0, le32_to_cpu((*data)->len));
                return;
        }
 
@@ -1623,6 +1624,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
                if (range_size < 0) {
                        IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n",
                                le32_to_cpu(reg->region_id), type);
+                       memset(*data, 0, le32_to_cpu((*data)->len));
                        return;
                }
                range = range + range_size;
@@ -1807,12 +1809,12 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
 
        trigger = fwrt->dump.active_trigs[id].trig;
 
-       size = sizeof(*dump_file);
-       size += iwl_fw_ini_get_trigger_len(fwrt, trigger);
-
+       size = iwl_fw_ini_get_trigger_len(fwrt, trigger);
        if (!size)
                return NULL;
 
+       size += sizeof(*dump_file);
+
        dump_file = vzalloc(size);
        if (!dump_file)
                return NULL;
@@ -1942,14 +1944,10 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
        iwl_dump_error_desc->len = 0;
 
        ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0);
-       if (ret) {
+       if (ret)
                kfree(iwl_dump_error_desc);
-       } else {
-               set_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
-
-               /* trigger nmi to halt the fw */
-               iwl_force_nmi(fwrt->trans);
-       }
+       else
+               iwl_trans_sync_nmi(fwrt->trans);
 
        return ret;
 }
@@ -2489,22 +2487,6 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
 
 void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt)
 {
-       /* if the wait event timeout elapses instead of wake up then
-        * the driver did not receive NMI interrupt and can not assume the FW
-        * is halted
-        */
-       int ret = wait_event_timeout(fwrt->trans->fw_halt_waitq,
-                                    !test_bit(STATUS_FW_WAIT_DUMP,
-                                              &fwrt->trans->status),
-                                    msecs_to_jiffies(2000));
-       if (!ret) {
-               /* failed to receive NMI interrupt, assuming the FW is stuck */
-               set_bit(STATUS_FW_ERROR, &fwrt->trans->status);
-
-               clear_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
-       }
-
-       /* Assuming the op mode mutex is held at this point */
        iwl_fw_dbg_collect_sync(fwrt);
 
        iwl_trans_stop_device(fwrt->trans);
index 641c95d03b1574e5fda0ac72d0c85205039c52f3..e06407dc088b14ab6b4a6593a678963654b82152 100644 (file)
@@ -93,7 +93,7 @@ struct iwl_ucode_header {
        } u;
 };
 
-#define IWL_UCODE_INI_TLV_GROUP        BIT(24)
+#define IWL_UCODE_INI_TLV_GROUP        0x1000000
 
 /*
  * new TLV uCode file layout
@@ -148,11 +148,14 @@ enum iwl_ucode_tlv_type {
        IWL_UCODE_TLV_UMAC_DEBUG_ADDRS  = 54,
        IWL_UCODE_TLV_LMAC_DEBUG_ADDRS  = 55,
        IWL_UCODE_TLV_FW_RECOVERY_INFO  = 57,
-       IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION    = IWL_UCODE_INI_TLV_GROUP | 0x1,
-       IWL_UCODE_TLV_TYPE_HCMD                 = IWL_UCODE_INI_TLV_GROUP | 0x2,
-       IWL_UCODE_TLV_TYPE_REGIONS              = IWL_UCODE_INI_TLV_GROUP | 0x3,
-       IWL_UCODE_TLV_TYPE_TRIGGERS             = IWL_UCODE_INI_TLV_GROUP | 0x4,
-       IWL_UCODE_TLV_TYPE_DEBUG_FLOW           = IWL_UCODE_INI_TLV_GROUP | 0x5,
+
+       IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION    = IWL_UCODE_INI_TLV_GROUP + 0x1,
+       IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION,
+       IWL_UCODE_TLV_TYPE_HCMD                 = IWL_UCODE_INI_TLV_GROUP + 0x2,
+       IWL_UCODE_TLV_TYPE_REGIONS              = IWL_UCODE_INI_TLV_GROUP + 0x3,
+       IWL_UCODE_TLV_TYPE_TRIGGERS             = IWL_UCODE_INI_TLV_GROUP + 0x4,
+       IWL_UCODE_TLV_TYPE_DEBUG_FLOW           = IWL_UCODE_INI_TLV_GROUP + 0x5,
+       IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_DEBUG_FLOW,
 
        /* TLVs 0x1000-0x2000 are for internal driver usage */
        IWL_UCODE_TLV_FW_DBG_DUMP_LST   = 0x1000,
index 7adf4e4e841a92f3ae98534b011175e3cfb00ce7..12310e3d2fc5aa7b544b08c95ad3086c3de799c1 100644 (file)
@@ -76,7 +76,6 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
        fwrt->ops_ctx = ops_ctx;
        INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
        iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
-       init_waitqueue_head(&fwrt->trans->fw_halt_waitq);
 }
 IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
 
index f5f87773667b0bd2b68ccadcddcf3184777c04f9..93070848280a4e425f95de9842b5bbc42000fcfb 100644 (file)
@@ -549,8 +549,9 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
 extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
 extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
 extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
+extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
 extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
-extern const struct iwl_cfg iwl22260_2ax_cfg;
+extern const struct iwl_cfg iwl_ax200_cfg_cc;
 extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
 extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
 extern const struct iwl_cfg killer1650x_2ax_cfg;
index aea6d03e545a1db063f795c49a2b2c123b954452..e539bc94eff7fdcee8e4c979fe0581710768b275 100644 (file)
@@ -327,6 +327,7 @@ enum {
 #define CSR_HW_REV_TYPE_NONE           (0x00001F0)
 #define CSR_HW_REV_TYPE_QNJ            (0x0000360)
 #define CSR_HW_REV_TYPE_QNJ_B0         (0x0000364)
+#define CSR_HW_REV_TYPE_QUZ            (0x0000354)
 #define CSR_HW_REV_TYPE_HR_CDB         (0x0000340)
 #define CSR_HW_REV_TYPE_SO             (0x0000370)
 #define CSR_HW_REV_TYPE_TY             (0x0000420)
index 5798f434f68fd3c89361f17b2e66975f871ec907..c7070760a10aa2d9b1e8bc465abbaa31c459c879 100644 (file)
@@ -126,7 +126,8 @@ void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
                len -= ALIGN(tlv_len, 4);
                data += sizeof(*tlv) + ALIGN(tlv_len, 4);
 
-               if (!(tlv_type & IWL_UCODE_INI_TLV_GROUP))
+               if (tlv_type < IWL_UCODE_TLV_DEBUG_BASE ||
+                   tlv_type > IWL_UCODE_TLV_DEBUG_MAX)
                        continue;
 
                hdr = (void *)&tlv->data[0];
index bbebbf3efd57db1a2101a5e6cc02cf2095666dd7..d8690acee40c0c45668f6480b10daff295633500 100644 (file)
@@ -338,7 +338,6 @@ enum iwl_d3_status {
  *     are sent
  * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
  * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
- * @STATUS_FW_WAIT_DUMP: if set, wait until cleared before collecting dump
  */
 enum iwl_trans_status {
        STATUS_SYNC_HCMD_ACTIVE,
@@ -351,7 +350,6 @@ enum iwl_trans_status {
        STATUS_TRANS_GOING_IDLE,
        STATUS_TRANS_IDLE,
        STATUS_TRANS_DEAD,
-       STATUS_FW_WAIT_DUMP,
 };
 
 static inline int
@@ -618,6 +616,7 @@ struct iwl_trans_ops {
        struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
                                                 u32 dump_mask);
        void (*debugfs_cleanup)(struct iwl_trans *trans);
+       void (*sync_nmi)(struct iwl_trans *trans);
 };
 
 /**
@@ -831,7 +830,6 @@ struct iwl_trans {
        u32 lmac_error_event_table[2];
        u32 umac_error_event_table;
        unsigned int error_event_table_tlv_status;
-       wait_queue_head_t fw_halt_waitq;
 
        /* pointer to trans specific struct */
        /*Ensure that this pointer will always be aligned to sizeof pointer */
@@ -1239,10 +1237,12 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
        /* prevent double restarts due to the same erroneous FW */
        if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
                iwl_op_mode_nic_error(trans->op_mode);
+}
 
-       if (test_and_clear_bit(STATUS_FW_WAIT_DUMP, &trans->status))
-               wake_up(&trans->fw_halt_waitq);
-
+static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
+{
+       if (trans->ops->sync_nmi)
+               trans->ops->sync_nmi(trans);
 }
 
 /*****************************************************
index 2453ceabf00dc867117286e143d3ac9b31ae247d..6925527d8457a294ff423547d86511e5a2718ae8 100644 (file)
@@ -774,8 +774,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                return;
 
        mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
-
-       if (!mvmvif->dbgfs_dir) {
+       if (IS_ERR_OR_NULL(mvmvif->dbgfs_dir)) {
                IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n",
                        dbgfs_dir);
                return;
index e9822a3ec373929ff520c9dea90c51194adcb69b..94132cfd1f56241b7e9548466c7050541c87e4c5 100644 (file)
@@ -460,9 +460,7 @@ static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
 static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
                                     struct cfg80211_pmsr_result *res)
 {
-       s64 rtt_avg = res->ftm.rtt_avg * 100;
-
-       do_div(rtt_avg, 6666);
+       s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
 
        IWL_DEBUG_INFO(mvm, "entry %d\n", index);
        IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
index 00a47f6f1d81503a8e8212566efe6ef6e728ccc8..ab68b5d53ec957d02156f3989c336d6b448a70be 100644 (file)
@@ -1121,7 +1121,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        ret = iwl_mvm_load_rt_fw(mvm);
        if (ret) {
                IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
-               iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
+               if (ret != -ERFKILL)
+                       iwl_fw_dbg_error_collect(&mvm->fwrt,
+                                                FW_DBG_TRIGGER_DRIVER);
                goto error;
        }
 
index 3a92c09d46926fa6d2d565df9e1479e7988ccde1..6a3b11dd2edf53cf4352178a56189c1ebe99de66 100644 (file)
@@ -2714,9 +2714,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
 
        iwl_mvm_mac_ctxt_remove(mvm, vif);
 
-       kfree(mvmvif->ap_wep_key);
-       mvmvif->ap_wep_key = NULL;
-
        mutex_unlock(&mvm->mutex);
 }
 
@@ -3183,24 +3180,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                ret = iwl_mvm_update_sta(mvm, vif, sta);
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTHORIZED) {
-               /* if wep is used, need to set the key for the station now */
-               if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
-                       mvm_sta->wep_key =
-                               kmemdup(mvmvif->ap_wep_key,
-                                       sizeof(*mvmvif->ap_wep_key) +
-                                       mvmvif->ap_wep_key->keylen,
-                                       GFP_KERNEL);
-                       if (!mvm_sta->wep_key) {
-                               ret = -ENOMEM;
-                               goto out_unlock;
-                       }
-
-                       ret = iwl_mvm_set_sta_key(mvm, vif, sta,
-                                                 mvm_sta->wep_key,
-                                                 STA_KEY_IDX_INVALID);
-               } else {
-                       ret = 0;
-               }
+               ret = 0;
 
                /* we don't support TDLS during DCM */
                if (iwl_mvm_phy_ctx_count(mvm) > 1)
@@ -3242,17 +3222,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                                                   NL80211_TDLS_DISABLE_LINK);
                }
 
-               /* Remove STA key if this is an AP using WEP */
-               if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
-                       int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta,
-                                                           mvm_sta->wep_key);
-
-                       if (!ret)
-                               ret = rm_ret;
-                       kfree(mvm_sta->wep_key);
-                       mvm_sta->wep_key = NULL;
-               }
-
                if (unlikely(ret &&
                             test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
                                      &mvm->status)))
@@ -3289,6 +3258,13 @@ static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
                                  struct ieee80211_sta *sta, u32 changed)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (changed & (IEEE80211_RC_BW_CHANGED |
+                      IEEE80211_RC_SUPP_RATES_CHANGED |
+                      IEEE80211_RC_NSS_CHANGED))
+               iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+                                    true);
 
        if (vif->type == NL80211_IFTYPE_STATION &&
            changed & IEEE80211_RC_NSS_CHANGED)
@@ -3439,20 +3415,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
                break;
        case WLAN_CIPHER_SUITE_WEP40:
        case WLAN_CIPHER_SUITE_WEP104:
-               if (vif->type == NL80211_IFTYPE_AP) {
-                       struct iwl_mvm_vif *mvmvif =
-                               iwl_mvm_vif_from_mac80211(vif);
-
-                       mvmvif->ap_wep_key = kmemdup(key,
-                                                    sizeof(*key) + key->keylen,
-                                                    GFP_KERNEL);
-                       if (!mvmvif->ap_wep_key)
-                               return -ENOMEM;
-               }
-
-               if (vif->type != NL80211_IFTYPE_STATION)
-                       return 0;
-               break;
+               if (vif->type == NL80211_IFTYPE_STATION)
+                       break;
+               if (iwl_mvm_has_new_tx_api(mvm))
+                       return -EOPNOTSUPP;
+               /* support HW crypto on TX */
+               return 0;
        default:
                /* currently FW supports only one optional cipher scheme */
                if (hw->n_cipher_schemes &&
@@ -3540,12 +3508,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
                ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
                if (ret) {
                        IWL_WARN(mvm, "set key failed\n");
+                       key->hw_key_idx = STA_KEY_IDX_INVALID;
                        /*
                         * can't add key for RX, but we don't need it
-                        * in the device for TX so still return 0
+                        * in the device for TX so still return 0,
+                        * unless we have new TX API where we cannot
+                        * put key material into the TX_CMD
                         */
-                       key->hw_key_idx = STA_KEY_IDX_INVALID;
-                       ret = 0;
+                       if (iwl_mvm_has_new_tx_api(mvm))
+                               ret = -EOPNOTSUPP;
+                       else
+                               ret = 0;
                }
 
                break;
index bca6f6b536d9754133c9ac8ab00e271c1f2bb06f..a50dc53df08698ff0afafbef8f74692007afc17b 100644 (file)
@@ -498,7 +498,6 @@ struct iwl_mvm_vif {
        netdev_features_t features;
 
        struct iwl_probe_resp_data __rcu *probe_resp_data;
-       struct ieee80211_key_conf *ap_wep_key;
 };
 
 static inline struct iwl_mvm_vif *
index ba27dce4c2bbda399ce95865aeb3c9dda81e1adc..13681b03c10e15a4a42d9a7c598b91d80726abd7 100644 (file)
@@ -834,7 +834,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        mutex_lock(&mvm->mutex);
        iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
        err = iwl_run_init_mvm_ucode(mvm, true);
-       if (err)
+       if (err && err != -ERFKILL)
                iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
        if (!iwlmvm_mod_params.init_dbg || !err)
                iwl_mvm_stop_device(mvm);
index 1e03acf30762df6778335bda0d6acd60c11d01cb..b516fd1867ecf9f1cac26b540bd90c3c14c1e7e1 100644 (file)
@@ -169,9 +169,9 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
 }
 
 /* iwl_mvm_create_skb Adds the rxb to a new skb */
-static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
-                              u16 len, u8 crypt_len,
-                              struct iwl_rx_cmd_buffer *rxb)
+static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+                             struct ieee80211_hdr *hdr, u16 len, u8 crypt_len,
+                             struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
@@ -204,6 +204,20 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
         * present before copying packet data.
         */
        hdrlen += crypt_len;
+
+       if (WARN_ONCE(headlen < hdrlen,
+                     "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
+                     hdrlen, len, crypt_len)) {
+               /*
+                * We warn and trace because we want to be able to see
+                * it in trace-cmd as well.
+                */
+               IWL_DEBUG_RX(mvm,
+                            "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n",
+                            hdrlen, len, crypt_len);
+               return -EINVAL;
+       }
+
        skb_put_data(skb, hdr, hdrlen);
        skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
 
@@ -216,6 +230,8 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
                skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
                                fraglen, rxb->truesize);
        }
+
+       return 0;
 }
 
 static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
@@ -1671,7 +1687,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                        rx_status->boottime_ns = ktime_get_boot_ns();
        }
 
-       iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
+       if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) {
+               kfree_skb(skb);
+               goto out;
+       }
+
        if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
                iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue,
                                                sta, csi);
index 498c315291cfac599bd23df37fad6c3e28541201..98d123dd7177845ff1676df2dbbdb2c492d89f6f 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -1399,7 +1399,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
 
                iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
                list_del_init(&mvmtxq->list);
+               local_bh_disable();
                iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+               local_bh_enable();
        }
 
        mutex_unlock(&mvm->mutex);
@@ -2333,21 +2335,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
                                   timeout);
 
-       if (mvmvif->ap_wep_key) {
-               u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
-
-               __set_bit(key_offset, mvm->fw_key_table);
-
-               if (key_offset == STA_KEY_IDX_INVALID)
-                       return -ENOSPC;
-
-               ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
-                                          mvmvif->ap_wep_key, true, 0, NULL, 0,
-                                          key_offset, 0);
-               if (ret)
-                       return ret;
-       }
-
        return 0;
 }
 
@@ -2419,28 +2406,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
 
-       if (mvmvif->ap_wep_key) {
-               int i;
-
-               if (!__test_and_clear_bit(mvmvif->ap_wep_key->hw_key_idx,
-                                         mvm->fw_key_table)) {
-                       IWL_ERR(mvm, "offset %d not used in fw key table.\n",
-                               mvmvif->ap_wep_key->hw_key_idx);
-                       return -ENOENT;
-               }
-
-               /* track which key was deleted last */
-               for (i = 0; i < STA_KEY_MAX_NUM; i++) {
-                       if (mvm->fw_key_deleted[i] < U8_MAX)
-                               mvm->fw_key_deleted[i]++;
-               }
-               mvm->fw_key_deleted[mvmvif->ap_wep_key->hw_key_idx] = 0;
-               ret = __iwl_mvm_remove_sta_key(mvm, mvmvif->mcast_sta.sta_id,
-                                              mvmvif->ap_wep_key, true);
-               if (ret)
-                       return ret;
-       }
-
        ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
        if (ret)
                IWL_WARN(mvm, "Failed sending remove station\n");
index 79700c7310a1a3cf38162d6ed3c582fe7c6ed67c..b4d4071b865db90dc81fd8c2db7d410b66686f30 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -394,7 +394,6 @@ struct iwl_mvm_rxq_dup_data {
  *     the BA window. To be used for UAPSD only.
  * @ptk_pn: per-queue PTK PN data structures
  * @dup_data: per queue duplicate packet detection data
- * @wep_key: used in AP mode. Is a duplicate of the WEP key.
  * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
  * @tx_ant: the index of the antenna to use for data tx to this station. Only
  *     used during connection establishment (e.g. for the 4 way handshake
@@ -426,8 +425,6 @@ struct iwl_mvm_sta {
        struct iwl_mvm_key_pn __rcu *ptk_pn[4];
        struct iwl_mvm_rxq_dup_data *dup_data;
 
-       struct ieee80211_key_conf *wep_key;
-
        u8 reserved_queue;
 
        /* Temporary, until the new TLC will control the Tx protection */
index 2b94e4cef56cfc5fd25a0343f189116ca0e78c96..9f1af8da9dc181eb1dcc48b2f0fb1d1b7ffa9836 100644 (file)
@@ -953,14 +953,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
        {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)},
 
-       {IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)},
+       {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x0088, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x008C, iwl_ax200_cfg_cc)},
        {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)},
        {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)},
-       {IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)},
+       {IWL_PCI_DEVICE(0x2723, 0x2080, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
+       {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
 
        {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)},
        {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)},
index bf8b61a476c5b017fac5a94e6cd7eb894116d169..59213164f35e3814cd0d7618cf8f6f54fd873f5b 100644 (file)
@@ -1043,7 +1043,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
 
 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
-void iwl_trans_sync_nmi(struct iwl_trans *trans);
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
index fe8269d023def832e97701bc7c1bd0c57df05a96..c4375b868901d092cc76c9b31d3d0b5f96b4f044 100644 (file)
@@ -3318,7 +3318,8 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
        .unref = iwl_trans_pcie_unref,                                  \
        .dump_data = iwl_trans_pcie_dump_data,                          \
        .d3_suspend = iwl_trans_pcie_d3_suspend,                        \
-       .d3_resume = iwl_trans_pcie_d3_resume
+       .d3_resume = iwl_trans_pcie_d3_resume,                          \
+       .sync_nmi = iwl_trans_pcie_sync_nmi
 
 #ifdef CONFIG_PM_SLEEP
 #define IWL_TRANS_PM_OPS                                               \
@@ -3542,6 +3543,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                }
        } else if (cfg == &iwl_ax101_cfg_qu_hr) {
                if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+                   trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
+                       trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+               } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
                    CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
                        trans->cfg = &iwl_ax101_cfg_qu_hr;
                } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
@@ -3560,7 +3565,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                }
        } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
-                  (trans->cfg != &iwl22260_2ax_cfg ||
+                  (trans->cfg != &iwl_ax200_cfg_cc ||
                    trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
                u32 hw_status;
 
@@ -3637,22 +3642,29 @@ out_no_pci:
        return ERR_PTR(ret);
 }
 
-void iwl_trans_sync_nmi(struct iwl_trans *trans)
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
+       u32 inta_addr, sw_err_bit;
+
+       if (trans_pcie->msix_enabled) {
+               inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
+               sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
+       } else {
+               inta_addr = CSR_INT;
+               sw_err_bit = CSR_INT_BIT_SW_ERR;
+       }
 
        iwl_disable_interrupts(trans);
        iwl_force_nmi(trans);
        while (time_after(timeout, jiffies)) {
-               u32 inta_hw = iwl_read32(trans,
-                                        CSR_MSIX_HW_INT_CAUSES_AD);
+               u32 inta_hw = iwl_read32(trans, inta_addr);
 
                /* Error detected by uCode */
-               if (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) {
+               if (inta_hw & sw_err_bit) {
                        /* Clear causes register */
-                       iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD,
-                                   inta_hw &
-                                   MSIX_HW_INT_CAUSES_REG_SW_ERR);
+                       iwl_write32(trans, inta_addr, inta_hw & sw_err_bit);
                        break;
                }
 
index 88530d9f4a54ced4e6c8d081cedaf7b0354cde8b..38d11033898716b3e9c1c5fae581c692d4ae44fe 100644 (file)
@@ -965,7 +965,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
                               cmd_str);
                ret = -ETIMEDOUT;
 
-               iwl_trans_sync_nmi(trans);
+               iwl_trans_pcie_sync_nmi(trans);
                goto cancel;
        }
 
index 9fbd37d23e851caf0042ef2861263815893969cb..7be73e2c4681cadc48ed5a838068d419196b413f 100644 (file)
@@ -1960,7 +1960,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
                               iwl_get_cmd_string(trans, cmd->id));
                ret = -ETIMEDOUT;
 
-               iwl_trans_sync_nmi(trans);
+               iwl_trans_pcie_sync_nmi(trans);
                goto cancel;
        }
 
index 2a407a71bcd14a7c88fe67df51fcc3d1ee4b8ae8..c71adb1f1f4170fa4638abd80011360061b72192 100644 (file)
@@ -2640,7 +2640,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
        enum nl80211_band band;
        const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
        struct net *net;
-       int idx;
+       int idx, i;
        int n_limits = 0;
 
        if (WARN_ON(param->channels > 1 && !param->use_chanctx))
@@ -2764,12 +2764,23 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                goto failed_hw;
        }
 
+       data->if_combination.max_interfaces = 0;
+       for (i = 0; i < n_limits; i++)
+               data->if_combination.max_interfaces +=
+                       data->if_limits[i].max;
+
        data->if_combination.n_limits = n_limits;
-       data->if_combination.max_interfaces = 2048;
        data->if_combination.limits = data->if_limits;
 
-       hw->wiphy->iface_combinations = &data->if_combination;
-       hw->wiphy->n_iface_combinations = 1;
+       /*
+        * If we actually were asked to support combinations,
+        * advertise them - if there's only a single thing like
+        * only IBSS then don't advertise it as combinations.
+        */
+       if (data->if_combination.max_interfaces > 1) {
+               hw->wiphy->iface_combinations = &data->if_combination;
+               hw->wiphy->n_iface_combinations = 1;
+       }
 
        if (param->ciphers) {
                memcpy(data->ciphers, param->ciphers,
index a85648342d15bcd3b906624bc1c2b15df2c08b6f..d5a70340a9457fda1f80efb38cec2fefdf0874f9 100644 (file)
@@ -181,7 +181,7 @@ static int mwifiex_sdio_resume(struct device *dev)
 
        adapter = card->adapter;
 
-       if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
+       if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
                mwifiex_dbg(adapter, WARN,
                            "device already resumed\n");
                return 0;
index 6eedc0ec76616cc55afec8b98f55db1ec27e540b..76629b98c78d78d81d7c61e0cd4771ba8d5f3c39 100644 (file)
@@ -130,6 +130,8 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
 static void
 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
 {
+       iowrite32(q->desc_dma, &q->regs->desc_base);
+       iowrite32(q->ndesc, &q->regs->ring_size);
        q->head = ioread32(&q->regs->dma_idx);
        q->tail = q->head;
        iowrite32(q->head, &q->regs->cpu_idx);
@@ -180,7 +182,10 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
        else
                mt76_dma_sync_idx(dev, q);
 
-       wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+       wake = wake && q->stopped &&
+              qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+       if (wake)
+               q->stopped = false;
 
        if (!q->queued)
                wake_up(&dev->tx_wait);
index a033745adb2f7a738ac576aafd41aa931fcc5ea6..316167404729fdcd8322c71bd5626785822ce90d 100644 (file)
@@ -679,19 +679,15 @@ out:
        return ret;
 }
 
-static void
-mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
-               struct ieee80211_sta *sta)
+void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta)
 {
        struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
-       int idx = wcid->idx;
-       int i;
+       int i, idx = wcid->idx;
 
        rcu_assign_pointer(dev->wcid[idx], NULL);
        synchronize_rcu();
 
-       mutex_lock(&dev->mutex);
-
        if (dev->drv->sta_remove)
                dev->drv->sta_remove(dev, vif, sta);
 
@@ -699,7 +695,15 @@ mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
        for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
                mt76_txq_remove(dev, sta->txq[i]);
        mt76_wcid_free(dev->wcid_mask, idx);
+}
+EXPORT_SYMBOL_GPL(__mt76_sta_remove);
 
+static void
+mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+               struct ieee80211_sta *sta)
+{
+       mutex_lock(&dev->mutex);
+       __mt76_sta_remove(dev, vif, sta);
        mutex_unlock(&dev->mutex);
 }
 
index 5dfb0601f1015c01251d409070ba64bda516db26..bcbfd3c4a44b68199dbc53e0e5ea498cc9e78e89 100644 (file)
@@ -126,6 +126,7 @@ struct mt76_queue {
        int ndesc;
        int queued;
        int buf_size;
+       bool stopped;
 
        u8 buf_offset;
        u8 hw_idx;
@@ -143,6 +144,7 @@ struct mt76_mcu_ops {
                         const struct mt76_reg_pair *rp, int len);
        int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
                         struct mt76_reg_pair *rp, int len);
+       int (*mcu_restart)(struct mt76_dev *dev);
 };
 
 struct mt76_queue_ops {
@@ -693,6 +695,8 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                   struct ieee80211_sta *sta,
                   enum ieee80211_sta_state old_state,
                   enum ieee80211_sta_state new_state);
+void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta);
 
 struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
 
index afcd86f735b40e1a508d7fba5c4d38429906c9af..4dcb465095d19e9a0fe88c15a757a59fec802d87 100644 (file)
@@ -135,8 +135,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
 
 out:
        mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
-       if (dev->mt76.q_tx[MT_TXQ_BEACON].queued >
-           __sw_hweight8(dev->beacon_mask))
+       if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask))
                dev->beacon_check++;
 }
 
index d69e82c66ab29fb8e8c645de9270ab47a066dd49..b3ae0aaea62a15b51b1ed2c23bc8857f88dec8c9 100644 (file)
@@ -27,12 +27,16 @@ static void
 mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
 {
        __le32 *txd = (__le32 *)skb->data;
+       struct ieee80211_hdr *hdr;
+       struct ieee80211_sta *sta;
        struct mt7603_sta *msta;
        struct mt76_wcid *wcid;
+       void *priv;
        int idx;
        u32 val;
+       u8 tid;
 
-       if (skb->len < sizeof(MT_TXD_SIZE) + sizeof(struct ieee80211_hdr))
+       if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
                goto free;
 
        val = le32_to_cpu(txd[1]);
@@ -46,10 +50,19 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
        if (!wcid)
                goto free;
 
-       msta = container_of(wcid, struct mt7603_sta, wcid);
+       priv = msta = container_of(wcid, struct mt7603_sta, wcid);
        val = le32_to_cpu(txd[0]);
        skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
 
+       val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
+       val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
+       txd[0] = cpu_to_le32(val);
+
+       sta = container_of(priv, struct ieee80211_sta, drv_priv);
+       hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
+       tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+       ieee80211_sta_set_buffered(sta, tid, true);
+
        spin_lock_bh(&dev->ps_lock);
        __skb_queue_tail(&msta->psq, skb);
        if (skb_queue_len(&msta->psq) >= 64) {
index 15cc8f33b34d656d86c745cd10950d2253e864fc..3af45949e868909e3073335cc302411c5e6c9761 100644 (file)
@@ -112,7 +112,7 @@ static void
 mt7603_phy_init(struct mt7603_dev *dev)
 {
        int rx_chains = dev->mt76.antenna_mask;
-       int tx_chains = __sw_hweight8(rx_chains) - 1;
+       int tx_chains = hweight8(rx_chains) - 1;
 
        mt76_rmw(dev, MT_WF_RMAC_RMCR,
                 (MT_WF_RMAC_RMCR_SMPS_MODE |
@@ -510,6 +510,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
        bus_ops->rmw = mt7603_rmw;
        dev->mt76.bus = bus_ops;
 
+       spin_lock_init(&dev->ps_lock);
+
        INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work);
        tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
                     (unsigned long)dev);
index 0a0115861b51e500c777daeb805685286ebb4f5a..5abc02b578185a6467571f549987dd147e2b3d3b 100644 (file)
@@ -343,7 +343,7 @@ void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid)
                 MT_BA_CONTROL_1_RESET));
 }
 
-void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
                            int ba_size)
 {
        u32 addr = mt7603_wtbl2_addr(wcid);
@@ -358,43 +358,6 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
                mt76_clear(dev, addr + (15 * 4), tid_mask);
                return;
        }
-       mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
-
-       mt7603_mac_stop(dev);
-       switch (tid) {
-       case 0:
-               mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn);
-               break;
-       case 1:
-               mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn);
-               break;
-       case 2:
-               mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO,
-                              ssn);
-               mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI,
-                              ssn >> 8);
-               break;
-       case 3:
-               mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn);
-               break;
-       case 4:
-               mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn);
-               break;
-       case 5:
-               mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO,
-                              ssn);
-               mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI,
-                              ssn >> 4);
-               break;
-       case 6:
-               mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn);
-               break;
-       case 7:
-               mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn);
-               break;
-       }
-       mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2);
-       mt7603_mac_start(dev);
 
        for (i = 7; i > 0; i--) {
                if (ba_size >= MT_AGG_SIZE_LIMIT(i))
@@ -827,6 +790,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_tx_rate *rate = &info->control.rates[0];
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
        struct ieee80211_vif *vif = info->control.vif;
        struct mt7603_vif *mvif;
        int wlan_idx;
@@ -834,6 +798,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
        int tx_count = 8;
        u8 frame_type, frame_subtype;
        u16 fc = le16_to_cpu(hdr->frame_control);
+       u16 seqno = 0;
        u8 vif_idx = 0;
        u32 val;
        u8 bw;
@@ -919,7 +884,17 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
                tx_count = 0x1f;
 
        val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
-             FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl));
+                 MT_TXD3_SN_VALID;
+
+       if (ieee80211_is_data_qos(hdr->frame_control))
+               seqno = le16_to_cpu(hdr->seq_ctrl);
+       else if (ieee80211_is_back_req(hdr->frame_control))
+               seqno = le16_to_cpu(bar->start_seq_num);
+       else
+               val &= ~MT_TXD3_SN_VALID;
+
+       val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4);
+
        txwi[3] = cpu_to_le32(val);
 
        if (key) {
@@ -1072,7 +1047,7 @@ out:
        case MT_PHY_TYPE_HT:
                final_rate_flags |= IEEE80211_TX_RC_MCS;
                final_rate &= GENMASK(5, 0);
-               if (i > 15)
+               if (final_rate > 15)
                        return false;
                break;
        default:
index b10775ed92e65ff72036dc314c061437315c009c..a3c4ef198bfeea965fb3f8d71e9d622cc546bb1a 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/pci.h>
 #include <linux/module.h>
 #include "mt7603.h"
+#include "mac.h"
 #include "eeprom.h"
 
 static int
@@ -371,7 +372,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
        struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
        struct sk_buff_head list;
 
-       mt76_stop_tx_queues(&dev->mt76, sta, false);
+       mt76_stop_tx_queues(&dev->mt76, sta, true);
        mt7603_wtbl_set_ps(dev, msta, ps);
        if (ps)
                return;
@@ -385,6 +386,15 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
        mt7603_ps_tx_list(dev, &list);
 }
 
+static void
+mt7603_ps_set_more_data(struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr;
+
+       hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
+       hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+}
+
 static void
 mt7603_release_buffered_frames(struct ieee80211_hw *hw,
                               struct ieee80211_sta *sta,
@@ -399,6 +409,8 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
 
        __skb_queue_head_init(&list);
 
+       mt7603_wtbl_set_ps(dev, msta, false);
+
        spin_lock_bh(&dev->ps_lock);
        skb_queue_walk_safe(&msta->psq, skb, tmp) {
                if (!nframes)
@@ -409,11 +421,15 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
 
                skb_set_queue_mapping(skb, MT_TXQ_PSD);
                __skb_unlink(skb, &msta->psq);
+               mt7603_ps_set_more_data(skb);
                __skb_queue_tail(&list, skb);
                nframes--;
        }
        spin_unlock_bh(&dev->ps_lock);
 
+       if (!skb_queue_empty(&list))
+               ieee80211_sta_eosp(sta);
+
        mt7603_ps_tx_list(dev, &list);
 
        if (nframes)
@@ -568,13 +584,13 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        case IEEE80211_AMPDU_TX_OPERATIONAL:
                mtxq->aggr = true;
                mtxq->send_bar = false;
-               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size);
+               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, ba_size);
                break;
        case IEEE80211_AMPDU_TX_STOP_FLUSH:
        case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
                mtxq->aggr = false;
                ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
-               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
+               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
                break;
        case IEEE80211_AMPDU_TX_START:
                mtxq->agg_ssn = *ssn << 4;
@@ -582,7 +598,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                break;
        case IEEE80211_AMPDU_TX_STOP_CONT:
                mtxq->aggr = false;
-               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
+               mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
                break;
        }
index 4b0713f1fd5e3da78c83efa05b8f14aabd2fbfc4..d06905ea8cc63f2b9851eb679655007c1e8f7486 100644 (file)
@@ -433,7 +433,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
 {
        struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
        struct ieee80211_hw *hw = mt76_hw(dev);
-       int n_chains = __sw_hweight8(dev->mt76.antenna_mask);
+       int n_chains = hweight8(dev->mt76.antenna_mask);
        struct {
                u8 control_chan;
                u8 center_chan;
index 79f3324294328b0f5b842a98ea23115b96470ca2..6049f3b7c8fec429de86329d35662c4659f711ee 100644 (file)
@@ -200,7 +200,7 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval);
 int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb);
 void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data);
 void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid);
-void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
                            int ba_size);
 
 void mt7603_pse_client_reset(struct mt7603_dev *dev);
index e13fea80d970d228ef7b99faa69e489f017ffb49..b920be1f5718b75d2f7374b6bbe6a533ebdec9c2 100644 (file)
@@ -23,9 +23,9 @@ mt76_wmac_probe(struct platform_device *pdev)
        }
 
        mem_base = devm_ioremap_resource(&pdev->dev, res);
-       if (!mem_base) {
+       if (IS_ERR(mem_base)) {
                dev_err(&pdev->dev, "Failed to get memory resource\n");
-               return -EINVAL;
+               return PTR_ERR(mem_base);
        }
 
        mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
index 0290ba5869a5182ca283db62629d38a2e37b21ef..736f81752b5b488518e1393e6200b67be7bfa87c 100644 (file)
@@ -46,7 +46,7 @@ static const struct mt76_reg_pair common_mac_reg_table[] = {
        { MT_MM20_PROT_CFG,             0x01742004 },
        { MT_MM40_PROT_CFG,             0x03f42084 },
        { MT_TXOP_CTRL_CFG,             0x0000583f },
-       { MT_TX_RTS_CFG,                0x00092b20 },
+       { MT_TX_RTS_CFG,                0x00ffff20 },
        { MT_EXP_ACK_TIME,              0x002400ca },
        { MT_TXOP_HLDR_ET,              0x00000002 },
        { MT_XIFS_TIME_CFG,             0x33a41010 },
index 91718647da0285e40eda86aa97dc98559592732d..e5a06f74a6f701419f703844f4e73fba6307a627 100644 (file)
@@ -229,7 +229,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
        struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
        struct mt76x02_dev *dev;
        struct mt76_dev *mdev;
-       u32 asic_rev, mac_rev;
+       u32 mac_rev;
        int ret;
 
        mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
@@ -262,10 +262,14 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
                goto err;
        }
 
-       asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
+       mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
        mac_rev = mt76_rr(dev, MT_MAC_CSR0);
        dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n",
-                asic_rev, mac_rev);
+                mdev->rev, mac_rev);
+       if (!is_mt76x0(dev)) {
+               ret = -ENODEV;
+               goto err;
+       }
 
        /* Note: vendor driver skips this check for MT76X0U */
        if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
index 6915cce5def9342935784c888c477417e26b5c69..07061eb4d1e1b3ef97b7131791af79a0962b6360 100644 (file)
@@ -51,6 +51,7 @@ struct mt76x02_calibration {
        u16 false_cca;
        s8 avg_rssi_all;
        s8 agc_gain_adjust;
+       s8 agc_lowest_gain;
        s8 low_gain;
 
        s8 temp_vco;
@@ -114,8 +115,11 @@ struct mt76x02_dev {
        struct mt76x02_dfs_pattern_detector dfs_pd;
 
        /* edcca monitor */
+       unsigned long ed_trigger_timeout;
        bool ed_tx_blocked;
        bool ed_monitor;
+       u8 ed_monitor_enabled;
+       u8 ed_monitor_learning;
        u8 ed_trigger;
        u8 ed_silent;
        ktime_t ed_time;
@@ -188,6 +192,13 @@ void mt76x02_mac_start(struct mt76x02_dev *dev);
 
 void mt76x02_init_debugfs(struct mt76x02_dev *dev);
 
+static inline bool is_mt76x0(struct mt76x02_dev *dev)
+{
+       return mt76_chip(&dev->mt76) == 0x7610 ||
+              mt76_chip(&dev->mt76) == 0x7630 ||
+              mt76_chip(&dev->mt76) == 0x7650;
+}
+
 static inline bool is_mt76x2(struct mt76x02_dev *dev)
 {
        return mt76_chip(&dev->mt76) == 0x7612 ||
index 7580c5c986ffe5226f4c91feccaf73cd445aabc0..b1d6fd4861e3236b9cb255277c01b9a2f01b1fca 100644 (file)
@@ -116,6 +116,32 @@ static int read_agc(struct seq_file *file, void *data)
        return 0;
 }
 
+static int
+mt76_edcca_set(void *data, u64 val)
+{
+       struct mt76x02_dev *dev = data;
+       enum nl80211_dfs_regions region = dev->dfs_pd.region;
+
+       dev->ed_monitor_enabled = !!val;
+       dev->ed_monitor = dev->ed_monitor_enabled &&
+                         region == NL80211_DFS_ETSI;
+       mt76x02_edcca_init(dev, true);
+
+       return 0;
+}
+
+static int
+mt76_edcca_get(void *data, u64 *val)
+{
+       struct mt76x02_dev *dev = data;
+
+       *val = dev->ed_monitor_enabled;
+       return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set,
+                        "%lld\n");
+
 void mt76x02_init_debugfs(struct mt76x02_dev *dev)
 {
        struct dentry *dir;
@@ -127,6 +153,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
        debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
        debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
 
+       debugfs_create_file("edcca", 0400, dir, dev, &fops_edcca);
        debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
        debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
        debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
index e4649103efd49ab5d77c28f3f01d1aac1c81fc3b..17d12d212d1ba1d0a3eb8f65b9e0ecef42908406 100644 (file)
@@ -885,7 +885,8 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
        if (dfs_pd->region != region) {
                tasklet_disable(&dfs_pd->dfs_tasklet);
 
-               dev->ed_monitor = region == NL80211_DFS_ETSI;
+               dev->ed_monitor = dev->ed_monitor_enabled &&
+                                 region == NL80211_DFS_ETSI;
                mt76x02_edcca_init(dev, true);
 
                dfs_pd->region = region;
index 91ff6598eccfb55dfb0fe8e96b4edf6e7941f59f..4fe5a83ca5a41713d894a4210fe5ef0d68e47e17 100644 (file)
@@ -67,12 +67,39 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
 }
 EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
 
+void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
+                             struct ieee80211_key_conf *key)
+{
+       enum mt76x02_cipher_type cipher;
+       u8 key_data[32];
+       u32 iv, eiv;
+       u64 pn;
+
+       cipher = mt76x02_mac_get_key_info(key, key_data);
+       iv = mt76_rr(dev, MT_WCID_IV(idx));
+       eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
+
+       pn = (u64)eiv << 16;
+       if (cipher == MT_CIPHER_TKIP) {
+               pn |= (iv >> 16) & 0xff;
+               pn |= (iv & 0xff) << 8;
+       } else if (cipher >= MT_CIPHER_AES_CCMP) {
+               pn |= iv & 0xffff;
+       } else {
+               return;
+       }
+
+       atomic64_set(&key->tx_pn, pn);
+}
+
+
 int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
                             struct ieee80211_key_conf *key)
 {
        enum mt76x02_cipher_type cipher;
        u8 key_data[32];
        u8 iv_data[8];
+       u64 pn;
 
        cipher = mt76x02_mac_get_key_info(key, key_data);
        if (cipher == MT_CIPHER_NONE && key)
@@ -85,9 +112,22 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
        if (key) {
                mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
                               !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+
+               pn = atomic64_read(&key->tx_pn);
+
                iv_data[3] = key->keyidx << 6;
-               if (cipher >= MT_CIPHER_TKIP)
+               if (cipher >= MT_CIPHER_TKIP) {
                        iv_data[3] |= 0x20;
+                       put_unaligned_le32(pn >> 16, &iv_data[4]);
+               }
+
+               if (cipher == MT_CIPHER_TKIP) {
+                       iv_data[0] = (pn >> 8) & 0xff;
+                       iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
+                       iv_data[2] = pn & 0xff;
+               } else if (cipher >= MT_CIPHER_AES_CCMP) {
+                       put_unaligned_le16((pn & 0xffff), &iv_data[0]);
+               }
        }
 
        mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
@@ -426,7 +466,6 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
                return;
 
        rcu_read_lock();
-       mt76_tx_status_lock(mdev, &list);
 
        if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
                wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
@@ -439,6 +478,8 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
                                          drv_priv);
        }
 
+       mt76_tx_status_lock(mdev, &list);
+
        if (wcid) {
                if (stat->pktid >= MT_PACKET_ID_FIRST)
                        status.skb = mt76_tx_status_skb_get(mdev, wcid,
@@ -458,7 +499,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
                if (*update == 0 && stat_val == stat_cache &&
                    stat->wcid == msta->status.wcid && msta->n_frames < 32) {
                        msta->n_frames++;
-                       goto out;
+                       mt76_tx_status_unlock(mdev, &list);
+                       rcu_read_unlock();
+                       return;
                }
 
                mt76x02_mac_fill_tx_status(dev, status.info, &msta->status,
@@ -474,11 +517,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
 
        if (status.skb)
                mt76_tx_status_skb_done(mdev, status.skb, &list);
-       else
-               ieee80211_tx_status_ext(mt76_hw(dev), &status);
-
-out:
        mt76_tx_status_unlock(mdev, &list);
+
+       if (!status.skb)
+               ieee80211_tx_status_ext(mt76_hw(dev), &status);
        rcu_read_unlock();
 }
 
@@ -920,6 +962,7 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable)
                }
        }
        mt76x02_edcca_tx_enable(dev, true);
+       dev->ed_monitor_learning = true;
 
        /* clear previous CCA timer value */
        mt76_rr(dev, MT_ED_CCA_TIMER);
@@ -929,6 +972,10 @@ EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
 
 #define MT_EDCCA_TH            92
 #define MT_EDCCA_BLOCK_TH      2
+#define MT_EDCCA_LEARN_TH      50
+#define MT_EDCCA_LEARN_CCA     180
+#define MT_EDCCA_LEARN_TIMEOUT (20 * HZ)
+
 static void mt76x02_edcca_check(struct mt76x02_dev *dev)
 {
        ktime_t cur_time;
@@ -951,11 +998,23 @@ static void mt76x02_edcca_check(struct mt76x02_dev *dev)
                dev->ed_trigger = 0;
        }
 
-       if (dev->ed_trigger > MT_EDCCA_BLOCK_TH &&
-           !dev->ed_tx_blocked)
+       if (dev->cal.agc_lowest_gain &&
+           dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
+           dev->ed_trigger > MT_EDCCA_LEARN_TH) {
+               dev->ed_monitor_learning = false;
+               dev->ed_trigger_timeout = jiffies + 20 * HZ;
+       } else if (!dev->ed_monitor_learning &&
+                  time_is_after_jiffies(dev->ed_trigger_timeout)) {
+               dev->ed_monitor_learning = true;
+               mt76x02_edcca_tx_enable(dev, true);
+       }
+
+       if (dev->ed_monitor_learning)
+               return;
+
+       if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
                mt76x02_edcca_tx_enable(dev, false);
-       else if (dev->ed_silent > MT_EDCCA_BLOCK_TH &&
-                dev->ed_tx_blocked)
+       else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
                mt76x02_edcca_tx_enable(dev, true);
 }
 
index 6b1f25d2f64c3a931fbf1bd4c695984f9d5bf4ff..caeeef96c42faf74ccf9bb15cc60f4ffe865e9f4 100644 (file)
@@ -177,6 +177,8 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
                                 u8 key_idx, struct ieee80211_key_conf *key);
 int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
                             struct ieee80211_key_conf *key);
+void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
+                             struct ieee80211_key_conf *key);
 void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx,
                            u8 *mac);
 void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop);
index 1229f19f2b02c68b4144662e8097333d5133f3ac..daaed1220147ea914c32f13f32f04bd5a360d7fb 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/irq.h>
 
 #include "mt76x02.h"
+#include "mt76x02_mcu.h"
 #include "mt76x02_trace.h"
 
 struct beacon_bc_data {
@@ -418,9 +419,66 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
        return i < 4;
 }
 
+static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                            struct ieee80211_sta *sta,
+                            struct ieee80211_key_conf *key, void *data)
+{
+       struct mt76x02_dev *dev = hw->priv;
+       struct mt76_wcid *wcid;
+
+       if (!sta)
+           return;
+
+       wcid = (struct mt76_wcid *) sta->drv_priv;
+
+       if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
+           return;
+
+       mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
+}
+
+static void mt76x02_reset_state(struct mt76x02_dev *dev)
+{
+       int i;
+
+       lockdep_assert_held(&dev->mt76.mutex);
+
+       clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+       rcu_read_lock();
+       ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
+       rcu_read_unlock();
+
+       for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
+               struct ieee80211_sta *sta;
+               struct ieee80211_vif *vif;
+               struct mt76x02_sta *msta;
+               struct mt76_wcid *wcid;
+               void *priv;
+
+               wcid = rcu_dereference_protected(dev->mt76.wcid[i],
+                                       lockdep_is_held(&dev->mt76.mutex));
+               if (!wcid)
+                       continue;
+
+               priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
+               sta = container_of(priv, struct ieee80211_sta, drv_priv);
+
+               priv = msta->vif;
+               vif = container_of(priv, struct ieee80211_vif, drv_priv);
+
+               __mt76_sta_remove(&dev->mt76, vif, sta);
+               memset(msta, 0, sizeof(*msta));
+       }
+
+       dev->vif_mask = 0;
+       dev->beacon_mask = 0;
+}
+
 static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
 {
        u32 mask = dev->mt76.mmio.irqmask;
+       bool restart = dev->mt76.mcu_ops->mcu_restart;
        int i;
 
        ieee80211_stop_queues(dev->mt76.hw);
@@ -434,6 +492,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
 
        mutex_lock(&dev->mt76.mutex);
 
+       if (restart)
+               mt76x02_reset_state(dev);
+
        if (dev->beacon_mask)
                mt76_clear(dev, MT_BEACON_TIME_CFG,
                           MT_BEACON_TIME_CFG_BEACON_TX |
@@ -452,20 +513,21 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
        /* let fw reset DMA */
        mt76_set(dev, 0x734, 0x3);
 
+       if (restart)
+               dev->mt76.mcu_ops->mcu_restart(&dev->mt76);
+
        for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
                mt76_queue_tx_cleanup(dev, i, true);
 
        for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
                mt76_queue_rx_reset(dev, i);
 
-       mt76_wr(dev, MT_MAC_SYS_CTRL,
-               MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
-       mt76_set(dev, MT_WPDMA_GLO_CFG,
-                MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
+       mt76x02_mac_start(dev);
+
        if (dev->ed_monitor)
                mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
 
-       if (dev->beacon_mask)
+       if (dev->beacon_mask && !restart)
                mt76_set(dev, MT_BEACON_TIME_CFG,
                         MT_BEACON_TIME_CFG_BEACON_TX |
                         MT_BEACON_TIME_CFG_TBTT_EN);
@@ -486,9 +548,13 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
                napi_schedule(&dev->mt76.napi[i]);
        }
 
-       ieee80211_wake_queues(dev->mt76.hw);
-
-       mt76_txq_schedule_all(&dev->mt76);
+       if (restart) {
+               mt76x02_mcu_function_select(dev, Q_SELECT, 1);
+               ieee80211_restart_hw(dev->mt76.hw);
+       } else {
+               ieee80211_wake_queues(dev->mt76.hw);
+               mt76_txq_schedule_all(&dev->mt76);
+       }
 }
 
 static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
index a020c757ba5c6c59fba0463d01f5774a916e4339..a54b63a96eaefa24268f28573f1123c30ee6b9d5 100644 (file)
@@ -194,6 +194,8 @@ bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
                ret = true;
        }
 
+       dev->cal.agc_lowest_gain = dev->cal.agc_gain_adjust >= limit;
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);
index 43f07461c8d39b6045388c8bbe59b1d2b0fcd6e8..6fb52b596d421753ff24f247225136b30c15150b 100644 (file)
@@ -85,8 +85,9 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
 
        mt76x02_insert_hdr_pad(skb);
 
-       txwi = skb_push(skb, sizeof(struct mt76x02_txwi));
+       txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi));
        mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
+       skb_push(skb, sizeof(struct mt76x02_txwi));
 
        pid = mt76_tx_status_skb_add(mdev, wcid, skb);
        txwi->pktid = pid;
index a48c261b0c634bca601f8d9fcd496b99a5bc2fa7..cd072ac614f76847b86618a4f40c87af171f5147 100644 (file)
@@ -237,6 +237,8 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
        struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
        int idx = 0;
 
+       memset(msta, 0, sizeof(*msta));
+
        idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
        if (idx < 0)
                return -ENOSPC;
@@ -274,6 +276,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
        struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
        struct mt76_txq *mtxq;
 
+       memset(mvif, 0, sizeof(*mvif));
+
        mvif->idx = idx;
        mvif->group_wcid.idx = MT_VIF_WCID(idx);
        mvif->group_wcid.hw_key_idx = -1;
@@ -289,6 +293,12 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        struct mt76x02_dev *dev = hw->priv;
        unsigned int idx = 0;
 
+       /* Allow to change address in HW if we create first interface. */
+       if (!dev->vif_mask &&
+           (((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) ||
+            memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1)))
+               mt76x02_mac_setaddr(dev, vif->addr);
+
        if (vif->addr[0] & BIT(1))
                idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
 
@@ -311,10 +321,6 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        if (dev->vif_mask & BIT(idx))
                return -EBUSY;
 
-       /* Allow to change address in HW if we create first interface. */
-       if (!dev->vif_mask && !ether_addr_equal(dev->mt76.macaddr, vif->addr))
-                mt76x02_mac_setaddr(dev, vif->addr);
-
        dev->vif_mask |= BIT(idx);
 
        mt76x02_vif_init(dev, vif, idx);
index f8534362e2c8cdf7edf9b03d5e2118d885d5ca6f..a30ef2c5a9db0433cbb696f6ab6f3e8b1811cb5d 100644 (file)
@@ -106,7 +106,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev)
                { MT_TX_SW_CFG1,                0x00010000 },
                { MT_TX_SW_CFG2,                0x00000000 },
                { MT_TXOP_CTRL_CFG,             0x0400583f },
-               { MT_TX_RTS_CFG,                0x00100020 },
+               { MT_TX_RTS_CFG,                0x00ffff20 },
                { MT_TX_TIMEOUT_CFG,            0x000a2290 },
                { MT_TX_RETRY_CFG,              0x47f01f0f },
                { MT_EXP_ACK_TIME,              0x002c00dc },
index 6c619f1c65c9cbfbbdf9241dc340c448826ab228..d7abe3d73badbbce7100781ece4b5c91407bc0dd 100644 (file)
@@ -71,6 +71,7 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
 
 void mt76x2_cleanup(struct mt76x02_dev *dev);
 
+int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard);
 void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable);
 void mt76x2_init_txpower(struct mt76x02_dev *dev,
                         struct ieee80211_supported_band *sband);
index 984d9c4c2e1a8ac9bfb435ef8de97898cb720b4d..d3927a13e92e91068344e431176a3b42ef6ff444 100644 (file)
@@ -77,7 +77,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
        }
 }
 
-static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
+int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
 {
        const u8 *macaddr = dev->mt76.macaddr;
        u32 val;
index 03e24ae7f66c7c8953e7308a1bac304ff294fb1b..605dc66ae83be45d956c0483e9688a8482f442c8 100644 (file)
@@ -165,9 +165,30 @@ error:
        return -ENOENT;
 }
 
+static int
+mt76pci_mcu_restart(struct mt76_dev *mdev)
+{
+       struct mt76x02_dev *dev;
+       int ret;
+
+       dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+       mt76x02_mcu_cleanup(dev);
+       mt76x2_mac_reset(dev, true);
+
+       ret = mt76pci_load_firmware(dev);
+       if (ret)
+               return ret;
+
+       mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+       return 0;
+}
+
 int mt76x2_mcu_init(struct mt76x02_dev *dev)
 {
        static const struct mt76_mcu_ops mt76x2_mcu_ops = {
+               .mcu_restart = mt76pci_mcu_restart,
                .mcu_send_msg = mt76x02_mcu_msg_send,
        };
        int ret;
index 1848e8ab2e21cfb6332fd17259986d6b932dbff6..769a9b9720442c5d303e9a65519a8fe1de166089 100644 (file)
@@ -260,10 +260,15 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
        gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
        gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
 
-       if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
+       val = 0x1836 << 16;
+       if (!mt76x2_has_ext_lna(dev) &&
+           dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
                val = 0x1e42 << 16;
-       else
-               val = 0x1836 << 16;
+
+       if (mt76x2_has_ext_lna(dev) &&
+           dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ &&
+           dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40)
+               val = 0x0f36 << 16;
 
        val |= 0xf8;
 
@@ -280,6 +285,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
 {
        u8 *gain = dev->cal.agc_gain_init;
        u8 low_gain_delta, gain_delta;
+       u32 agc_35, agc_37;
        bool gain_change;
        int low_gain;
        u32 val;
@@ -318,6 +324,16 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
        else
                low_gain_delta = 14;
 
+       agc_37 = 0x2121262c;
+       if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+               agc_35 = 0x11111516;
+       else if (low_gain == 2)
+               agc_35 = agc_37 = 0x08080808;
+       else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+               agc_35 = 0x10101014;
+       else
+               agc_35 = 0x11111116;
+
        if (low_gain == 2) {
                mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
                mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
@@ -326,15 +342,13 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
                dev->cal.agc_gain_adjust = 0;
        } else {
                mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
-               if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
-                       mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
-               else
-                       mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
-               mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
                gain_delta = 0;
                dev->cal.agc_gain_adjust = low_gain_delta;
        }
 
+       mt76_wr(dev, MT_BBP(AGC, 35), agc_35);
+       mt76_wr(dev, MT_BBP(AGC, 37), agc_37);
+
        dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
        dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
        mt76x2_phy_set_gain_val(dev);
index ddb6b2c48e01283a8041620fdbc46542de8d04c7..ac0f13d4629963cea77e5ab113caf3e11a33275a 100644 (file)
 #include "mt76x2u.h"
 
 static const struct usb_device_id mt76x2u_device_table[] = {
-       { USB_DEVICE(0x0e8d, 0x7612) }, /* Alfa AWUS036ACM */
        { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
        { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
        { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
-       { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */
+       { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */
        { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
        { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
        { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
@@ -66,6 +65,10 @@ static int mt76x2u_probe(struct usb_interface *intf,
 
        mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
        dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
+       if (!is_mt76x2(dev)) {
+               err = -ENODEV;
+               goto err;
+       }
 
        err = mt76x2u_register_device(dev);
        if (err < 0)
index 5e84b4535cb1456c22bbf883d79c9d44c62da438..3b82345756ea90d3cc93f71946c2ec6cb7e81110 100644 (file)
@@ -93,7 +93,6 @@ int mt76x2u_mac_reset(struct mt76x02_dev *dev)
        mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
        mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
        mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
-       mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
 
        mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
        mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
index 5a349fe3e576f606ec2fb1997ed8a394d54d7b3f..2585df5123350ba8adf52cf14ef6133de7e03f77 100644 (file)
@@ -289,8 +289,11 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
        dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
        dev->queue_ops->kick(dev, q);
 
-       if (q->queued > q->ndesc - 8)
+       if (q->queued > q->ndesc - 8 && !q->stopped) {
                ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+               q->stopped = true;
+       }
+
        spin_unlock_bh(&q->lock);
 }
 EXPORT_SYMBOL_GPL(mt76_tx);
@@ -374,7 +377,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
        if (last_skb) {
                mt76_queue_ps_skb(dev, sta, last_skb, true);
                dev->queue_ops->kick(dev, hwq);
+       } else {
+               ieee80211_sta_eosp(sta);
        }
+
        spin_unlock_bh(&hwq->lock);
 }
 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
@@ -577,6 +583,9 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
        struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
        struct mt76_queue *hwq = mtxq->hwq;
 
+       if (!test_bit(MT76_STATE_RUNNING, &dev->state))
+               return;
+
        spin_lock_bh(&hwq->lock);
        if (list_empty(&mtxq->list))
                list_add_tail(&mtxq->list, &hwq->swq);
index ae6ada370597a6f0ebfd3b7d69bb583ecf8759ef..4c1abd4924054c6f377ca062ad98ca5bb8446dc7 100644 (file)
@@ -655,7 +655,11 @@ static void mt76u_tx_tasklet(unsigned long data)
                        spin_lock_bh(&q->lock);
                }
                mt76_txq_schedule(dev, q);
-               wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+
+               wake = q->stopped && q->queued < q->ndesc - 8;
+               if (wake)
+                       q->stopped = false;
+
                if (!q->queued)
                        wake_up(&dev->tx_wait);
 
index d8b7863f79261a3275b6641ffdf7607e23fdd206..6ae7f14dc9bf936ec3ae34f9f4d4f1ee67de6646 100644 (file)
@@ -303,6 +303,10 @@ static int mt7601u_probe(struct usb_interface *usb_intf,
        mac_rev = mt7601u_rr(dev, MT_MAC_CSR0);
        dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n",
                 asic_rev, mac_rev);
+       if ((asic_rev >> 16) != 0x7601) {
+               ret = -ENODEV;
+               goto err;
+       }
 
        /* Note: vendor driver skips this check for MT7601U */
        if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
index 4b1744e9fb78a08c59fe0ac71d0d9962ae6761be..50b92ca92bd75c33d783ed9bfdf0f01f7d5ce0ae 100644 (file)
@@ -673,7 +673,6 @@ enum rt2x00_state_flags {
        CONFIG_CHANNEL_HT40,
        CONFIG_POWERSAVING,
        CONFIG_HT_DISABLED,
-       CONFIG_QOS_DISABLED,
        CONFIG_MONITORING,
 
        /*
index 2825560e2424dbc766c5d5489491ff7dc67c5211..e8462f25d2522c4dbe95215b3de0279213cdc2b4 100644 (file)
@@ -642,18 +642,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
                        rt2x00dev->intf_associated--;
 
                rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
-
-               clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
        }
 
-       /*
-        * Check for access point which do not support 802.11e . We have to
-        * generate data frames sequence number in S/W for such AP, because
-        * of H/W bug.
-        */
-       if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
-               set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
-
        /*
         * When the erp information has changed, we should perform
         * additional configuration steps. For all other changes we are done.
index 92ddc19e7bf747a23d0eb24c15b05ff111751754..4834b4eb0206408093a54d47b2a6a5831aa75674 100644 (file)
@@ -201,15 +201,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
        if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
                /*
                 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
-                * seqno on retransmited data (non-QOS) frames. To workaround
-                * the problem let's generate seqno in software if QOS is
-                * disabled.
+                * seqno on retransmitted data (non-QOS) and management frames.
+                * To workaround the problem let's generate seqno in software.
+                * Except for beacons which are transmitted periodically by H/W
+                * hence hardware has to assign seqno for them.
                 */
-               if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
-                       __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
-               else
+               if (ieee80211_is_beacon(hdr->frame_control)) {
+                       __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
                        /* H/W will generate sequence number */
                        return;
+               }
+
+               __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
        }
 
        /*
index 2b26f762fbc3b3f5f837e27267d0de9fc1b9e5c8..01acb6e533655d6b6041cbbde43af8c1364aec60 100644 (file)
@@ -1074,6 +1074,12 @@ static const struct spi_device_id st95hf_id[] = {
 };
 MODULE_DEVICE_TABLE(spi, st95hf_id);
 
+static const struct of_device_id st95hf_spi_of_match[] = {
+        { .compatible = "st,st95hf" },
+        { },
+};
+MODULE_DEVICE_TABLE(of, st95hf_spi_of_match);
+
 static int st95hf_probe(struct spi_device *nfc_spi_dev)
 {
        int ret;
@@ -1260,6 +1266,7 @@ static struct spi_driver st95hf_driver = {
        .driver = {
                .name = "st95hf",
                .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(st95hf_spi_of_match),
        },
        .id_table = st95hf_id,
        .probe = st95hf_probe,
index b72a303176c70962e04f8304a816c78f812512c1..9486acc08402db3a17079c0ec2589ce445bb23d2 100644 (file)
@@ -198,14 +198,15 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
                return NULL;
 
        nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL);
-       if (nd_btt->id < 0) {
-               kfree(nd_btt);
-               return NULL;
-       }
+       if (nd_btt->id < 0)
+               goto out_nd_btt;
 
        nd_btt->lbasize = lbasize;
-       if (uuid)
+       if (uuid) {
                uuid = kmemdup(uuid, 16, GFP_KERNEL);
+               if (!uuid)
+                       goto out_put_id;
+       }
        nd_btt->uuid = uuid;
        dev = &nd_btt->dev;
        dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id);
@@ -220,6 +221,13 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
                return NULL;
        }
        return dev;
+
+out_put_id:
+       ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
+
+out_nd_btt:
+       kfree(nd_btt);
+       return NULL;
 }
 
 struct device *nd_btt_create(struct nd_region *nd_region)
index 7849bf1812c47e64f76e16c0ccf8f0ccc6f3bc25..f293556cbbf6d747004b132a23c440296ec760f7 100644 (file)
@@ -2249,9 +2249,12 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
        if (!nsblk->uuid)
                goto blk_err;
        memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
-       if (name[0])
+       if (name[0]) {
                nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
                                GFP_KERNEL);
+               if (!nsblk->alt_name)
+                       goto blk_err;
+       }
        res = nsblk_add_resource(nd_region, ndd, nsblk,
                        __le64_to_cpu(nd_label->dpa));
        if (!res)
index bc2f700feef8abdad873197237f34f765055c22f..0279eb1da3ef5ae40c5ab80ef6940732dca03bf0 100644 (file)
@@ -113,13 +113,13 @@ static void write_pmem(void *pmem_addr, struct page *page,
 
        while (len) {
                mem = kmap_atomic(page);
-               chunk = min_t(unsigned int, len, PAGE_SIZE);
+               chunk = min_t(unsigned int, len, PAGE_SIZE - off);
                memcpy_flushcache(pmem_addr, mem + off, chunk);
                kunmap_atomic(mem);
                len -= chunk;
                off = 0;
                page++;
-               pmem_addr += PAGE_SIZE;
+               pmem_addr += chunk;
        }
 }
 
@@ -132,7 +132,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
 
        while (len) {
                mem = kmap_atomic(page);
-               chunk = min_t(unsigned int, len, PAGE_SIZE);
+               chunk = min_t(unsigned int, len, PAGE_SIZE - off);
                rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
                kunmap_atomic(mem);
                if (rem)
@@ -140,7 +140,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
                len -= chunk;
                off = 0;
                page++;
-               pmem_addr += PAGE_SIZE;
+               pmem_addr += chunk;
        }
        return BLK_STS_OK;
 }
index f8bb746a549f7b993dcf61f052acde8303d11cae..a570f2263a424e96908c559750454a086a3df3e2 100644 (file)
@@ -22,6 +22,8 @@ static bool key_revalidate = true;
 module_param(key_revalidate, bool, 0444);
 MODULE_PARM_DESC(key_revalidate, "Require key validation at init.");
 
+static const char zero_key[NVDIMM_PASSPHRASE_LEN];
+
 static void *key_data(struct key *key)
 {
        struct encrypted_key_payload *epayload = dereference_key_locked(key);
@@ -75,6 +77,16 @@ static struct key *nvdimm_request_key(struct nvdimm *nvdimm)
        return key;
 }
 
+static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm,
+               struct key **key)
+{
+       *key = nvdimm_request_key(nvdimm);
+       if (!*key)
+               return zero_key;
+
+       return key_data(*key);
+}
+
 static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
                key_serial_t id, int subclass)
 {
@@ -105,36 +117,57 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
        return key;
 }
 
-static struct key *nvdimm_key_revalidate(struct nvdimm *nvdimm)
+static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm,
+               key_serial_t id, int subclass, struct key **key)
+{
+       *key = NULL;
+       if (id == 0) {
+               if (subclass == NVDIMM_BASE_KEY)
+                       return zero_key;
+               else
+                       return NULL;
+       }
+
+       *key = nvdimm_lookup_user_key(nvdimm, id, subclass);
+       if (!*key)
+               return NULL;
+
+       return key_data(*key);
+}
+
+
+static int nvdimm_key_revalidate(struct nvdimm *nvdimm)
 {
        struct key *key;
        int rc;
+       const void *data;
 
        if (!nvdimm->sec.ops->change_key)
-               return NULL;
+               return -EOPNOTSUPP;
 
-       key = nvdimm_request_key(nvdimm);
-       if (!key)
-               return NULL;
+       data = nvdimm_get_key_payload(nvdimm, &key);
 
        /*
         * Send the same key to the hardware as new and old key to
         * verify that the key is good.
         */
-       rc = nvdimm->sec.ops->change_key(nvdimm, key_data(key),
-                       key_data(key), NVDIMM_USER);
+       rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER);
        if (rc < 0) {
                nvdimm_put_key(key);
-               key = NULL;
+               return rc;
        }
-       return key;
+
+       nvdimm_put_key(key);
+       nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
+       return 0;
 }
 
 static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
 {
        struct device *dev = &nvdimm->dev;
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
-       struct key *key = NULL;
+       struct key *key;
+       const void *data;
        int rc;
 
        /* The bus lock should be held at the top level of the call stack */
@@ -160,16 +193,11 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
                if (!key_revalidate)
                        return 0;
 
-               key = nvdimm_key_revalidate(nvdimm);
-               if (!key)
-                       return nvdimm_security_freeze(nvdimm);
+               return nvdimm_key_revalidate(nvdimm);
        } else
-               key = nvdimm_request_key(nvdimm);
+               data = nvdimm_get_key_payload(nvdimm, &key);
 
-       if (!key)
-               return -ENOKEY;
-
-       rc = nvdimm->sec.ops->unlock(nvdimm, key_data(key));
+       rc = nvdimm->sec.ops->unlock(nvdimm, data);
        dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
                        rc == 0 ? "success" : "fail");
 
@@ -195,6 +223,7 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
        struct key *key;
        int rc;
+       const void *data;
 
        /* The bus lock should be held at the top level of the call stack */
        lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -214,11 +243,12 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid)
                return -EBUSY;
        }
 
-       key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
-       if (!key)
+       data = nvdimm_get_user_key_payload(nvdimm, keyid,
+                       NVDIMM_BASE_KEY, &key);
+       if (!data)
                return -ENOKEY;
 
-       rc = nvdimm->sec.ops->disable(nvdimm, key_data(key));
+       rc = nvdimm->sec.ops->disable(nvdimm, data);
        dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
                        rc == 0 ? "success" : "fail");
 
@@ -235,6 +265,7 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
        struct key *key, *newkey;
        int rc;
+       const void *data, *newdata;
 
        /* The bus lock should be held at the top level of the call stack */
        lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -249,22 +280,19 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
                return -EIO;
        }
 
-       if (keyid == 0)
-               key = NULL;
-       else {
-               key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
-               if (!key)
-                       return -ENOKEY;
-       }
+       data = nvdimm_get_user_key_payload(nvdimm, keyid,
+                       NVDIMM_BASE_KEY, &key);
+       if (!data)
+               return -ENOKEY;
 
-       newkey = nvdimm_lookup_user_key(nvdimm, new_keyid, NVDIMM_NEW_KEY);
-       if (!newkey) {
+       newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid,
+                       NVDIMM_NEW_KEY, &newkey);
+       if (!newdata) {
                nvdimm_put_key(key);
                return -ENOKEY;
        }
 
-       rc = nvdimm->sec.ops->change_key(nvdimm, key ? key_data(key) : NULL,
-                       key_data(newkey), pass_type);
+       rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type);
        dev_dbg(dev, "key: %d %d update%s: %s\n",
                        key_serial(key), key_serial(newkey),
                        pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
@@ -286,8 +314,9 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
 {
        struct device *dev = &nvdimm->dev;
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
-       struct key *key;
+       struct key *key = NULL;
        int rc;
+       const void *data;
 
        /* The bus lock should be held at the top level of the call stack */
        lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -319,11 +348,12 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid,
                return -EOPNOTSUPP;
        }
 
-       key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
-       if (!key)
+       data = nvdimm_get_user_key_payload(nvdimm, keyid,
+                       NVDIMM_BASE_KEY, &key);
+       if (!data)
                return -ENOKEY;
 
-       rc = nvdimm->sec.ops->erase(nvdimm, key_data(key), pass_type);
+       rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
        dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
                        pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
                        rc == 0 ? "success" : "fail");
@@ -337,8 +367,9 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
 {
        struct device *dev = &nvdimm->dev;
        struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
-       struct key *key;
+       struct key *key = NULL;
        int rc;
+       const void *data;
 
        /* The bus lock should be held at the top level of the call stack */
        lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
@@ -368,15 +399,12 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
                return -EBUSY;
        }
 
-       if (keyid == 0)
-               key = NULL;
-       else {
-               key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
-               if (!key)
-                       return -ENOKEY;
-       }
+       data = nvdimm_get_user_key_payload(nvdimm, keyid,
+                       NVDIMM_BASE_KEY, &key);
+       if (!data)
+               return -ENOKEY;
 
-       rc = nvdimm->sec.ops->overwrite(nvdimm, key ? key_data(key) : NULL);
+       rc = nvdimm->sec.ops->overwrite(nvdimm, data);
        dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
                        rc == 0 ? "success" : "fail");
 
index 470601980794edd9ebd803199587c62f0586fb03..6265d9225ec8f783c02bb30c5d0048787f18b67f 100644 (file)
@@ -288,7 +288,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
                                "Cancelling I/O %d", req->tag);
 
        nvme_req(req)->status = NVME_SC_ABORT_REQ;
-       blk_mq_complete_request(req);
+       blk_mq_complete_request_sync(req);
        return true;
 }
 EXPORT_SYMBOL_GPL(nvme_cancel_request);
@@ -388,7 +388,7 @@ static void nvme_free_ns_head(struct kref *ref)
        nvme_mpath_remove_disk(head);
        ida_simple_remove(&head->subsys->ns_ida, head->instance);
        list_del_init(&head->entry);
-       cleanup_srcu_struct_quiesced(&head->srcu);
+       cleanup_srcu_struct(&head->srcu);
        nvme_put_subsystem(head->subsys);
        kfree(head);
 }
index f3b9d91ba0dfd30ba7c4c3f554e14ea860c389b7..6d8451356eaca468742ecf335ee20763d6f73876 100644 (file)
@@ -1845,7 +1845,7 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
        memset(queue, 0, sizeof(*queue));
        queue->ctrl = ctrl;
        queue->qnum = idx;
-       atomic_set(&queue->csn, 1);
+       atomic_set(&queue->csn, 0);
        queue->dev = ctrl->dev;
 
        if (idx > 0)
@@ -1887,7 +1887,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
         */
 
        queue->connection_id = 0;
-       atomic_set(&queue->csn, 1);
+       atomic_set(&queue->csn, 0);
 }
 
 static void
@@ -2183,7 +2183,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 {
        struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
        struct nvme_command *sqe = &cmdiu->sqe;
-       u32 csn;
        int ret, opstate;
 
        /*
@@ -2198,8 +2197,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
 
        /* format the FC-NVME CMD IU and fcp_req */
        cmdiu->connection_id = cpu_to_be64(queue->connection_id);
-       csn = atomic_inc_return(&queue->csn);
-       cmdiu->csn = cpu_to_be32(csn);
        cmdiu->data_len = cpu_to_be32(data_len);
        switch (io_dir) {
        case NVMEFC_FCP_WRITE:
@@ -2257,11 +2254,24 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
        if (!(op->flags & FCOP_FLAGS_AEN))
                blk_mq_start_request(op->rq);
 
+       cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
        ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
                                        &ctrl->rport->remoteport,
                                        queue->lldd_handle, &op->fcp_req);
 
        if (ret) {
+               /*
+                * If the lld fails to send the command is there an issue with
+                * the csn value?  If the command that fails is the Connect,
+                * no - as the connection won't be live.  If it is a command
+                * post-connect, it's possible a gap in csn may be created.
+                * Does this matter?  As Linux initiators don't send fused
+                * commands, no.  The gap would exist, but as there's nothing
+                * that depends on csn order to be delivered on the target
+                * side, it shouldn't hurt.  It would be difficult for a
+                * target to even detect the csn gap as it has no idea when the
+                * cmd with the csn was supposed to arrive.
+                */
                opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
                __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
 
index 2839bb70badfbcb8284bc5bbbc1f457bd3b58c63..f0716f6ce41fa2a1ad993e45adba9148d7f0c120 100644 (file)
@@ -404,15 +404,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
                struct nvme_ns *ns)
 {
-       enum nvme_ana_state old;
-
        mutex_lock(&ns->head->lock);
-       old = ns->ana_state;
        ns->ana_grpid = le32_to_cpu(desc->grpid);
        ns->ana_state = desc->state;
        clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
 
-       if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old))
+       if (nvme_state_is_live(ns->ana_state))
                nvme_mpath_set_live(ns);
        mutex_unlock(&ns->head->lock);
 }
index e7e08889865e732d503a6ac2af5d38cac4dd9672..68c49dd672104d82ea768a6e9bf4354df731422b 100644 (file)
@@ -627,7 +627,7 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
        return ret;
 }
 
-static inline void nvme_tcp_end_request(struct request *rq, __le16 status)
+static inline void nvme_tcp_end_request(struct request *rq, u16 status)
 {
        union nvme_result res = {};
 
index 76250181fee0555b2e576651ee67349ecb776392..9f72d515fc4b30a3785b396910660074ad076cf4 100644 (file)
@@ -24,6 +24,11 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
        return len;
 }
 
+u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
+{
+       return le64_to_cpu(cmd->get_log_page.lpo);
+}
+
 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
 {
        nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
index 2d73b66e368627cdee268a74d30fb3c5d6a34235..b3e765a95af8ee7447c536ff48095504c8100d67 100644 (file)
@@ -509,7 +509,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
 
        ret = nvmet_p2pmem_ns_enable(ns);
        if (ret)
-               goto out_unlock;
+               goto out_dev_disable;
 
        list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
                nvmet_p2pmem_ns_add_p2p(ctrl, ns);
@@ -550,7 +550,7 @@ out_unlock:
 out_dev_put:
        list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
                pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
-
+out_dev_disable:
        nvmet_ns_dev_disable(ns);
        goto out_unlock;
 }
index c872b47a88f31722b358e219c403f2c2cb765988..33ed95e72d6b19598f76df0c50f6fccfdaec37bd 100644 (file)
@@ -131,54 +131,76 @@ static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port
                memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
 }
 
+static size_t discovery_log_entries(struct nvmet_req *req)
+{
+       struct nvmet_ctrl *ctrl = req->sq->ctrl;
+       struct nvmet_subsys_link *p;
+       struct nvmet_port *r;
+       size_t entries = 0;
+
+       list_for_each_entry(p, &req->port->subsystems, entry) {
+               if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
+                       continue;
+               entries++;
+       }
+       list_for_each_entry(r, &req->port->referrals, entry)
+               entries++;
+       return entries;
+}
+
 static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
 {
        const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
        struct nvmet_ctrl *ctrl = req->sq->ctrl;
        struct nvmf_disc_rsp_page_hdr *hdr;
+       u64 offset = nvmet_get_log_page_offset(req->cmd);
        size_t data_len = nvmet_get_log_page_len(req->cmd);
-       size_t alloc_len = max(data_len, sizeof(*hdr));
-       int residual_len = data_len - sizeof(*hdr);
+       size_t alloc_len;
        struct nvmet_subsys_link *p;
        struct nvmet_port *r;
        u32 numrec = 0;
        u16 status = 0;
+       void *buffer;
+
+       /* Spec requires dword aligned offsets */
+       if (offset & 0x3) {
+               status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+               goto out;
+       }
 
        /*
         * Make sure we're passing at least a buffer of response header size.
         * If host provided data len is less than the header size, only the
         * number of bytes requested by host will be sent to host.
         */
-       hdr = kzalloc(alloc_len, GFP_KERNEL);
-       if (!hdr) {
+       down_read(&nvmet_config_sem);
+       alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
+       buffer = kzalloc(alloc_len, GFP_KERNEL);
+       if (!buffer) {
+               up_read(&nvmet_config_sem);
                status = NVME_SC_INTERNAL;
                goto out;
        }
 
-       down_read(&nvmet_config_sem);
+       hdr = buffer;
        list_for_each_entry(p, &req->port->subsystems, entry) {
+               char traddr[NVMF_TRADDR_SIZE];
+
                if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
                        continue;
-               if (residual_len >= entry_size) {
-                       char traddr[NVMF_TRADDR_SIZE];
-
-                       nvmet_set_disc_traddr(req, req->port, traddr);
-                       nvmet_format_discovery_entry(hdr, req->port,
-                                       p->subsys->subsysnqn, traddr,
-                                       NVME_NQN_NVME, numrec);
-                       residual_len -= entry_size;
-               }
+
+               nvmet_set_disc_traddr(req, req->port, traddr);
+               nvmet_format_discovery_entry(hdr, req->port,
+                               p->subsys->subsysnqn, traddr,
+                               NVME_NQN_NVME, numrec);
                numrec++;
        }
 
        list_for_each_entry(r, &req->port->referrals, entry) {
-               if (residual_len >= entry_size) {
-                       nvmet_format_discovery_entry(hdr, r,
-                                       NVME_DISC_SUBSYS_NAME,
-                                       r->disc_addr.traddr,
-                                       NVME_NQN_DISC, numrec);
-                       residual_len -= entry_size;
-               }
+               nvmet_format_discovery_entry(hdr, r,
+                               NVME_DISC_SUBSYS_NAME,
+                               r->disc_addr.traddr,
+                               NVME_NQN_DISC, numrec);
                numrec++;
        }
 
@@ -190,8 +212,8 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
 
        up_read(&nvmet_config_sem);
 
-       status = nvmet_copy_to_sgl(req, 0, hdr, data_len);
-       kfree(hdr);
+       status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
+       kfree(buffer);
 out:
        nvmet_req_complete(req, status);
 }
index 3e43212d3c1c6bba5a6d553dc2a965188c5ccbf5..bc6ebb51b0bf7c5310940fca19450fd115ea7788 100644 (file)
@@ -75,11 +75,11 @@ err:
        return ret;
 }
 
-static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
+static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
 {
-       bv->bv_page = sg_page_iter_page(iter);
-       bv->bv_offset = iter->sg->offset;
-       bv->bv_len = PAGE_SIZE - iter->sg->offset;
+       bv->bv_page = sg_page(sg);
+       bv->bv_offset = sg->offset;
+       bv->bv_len = sg->length;
 }
 
 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
@@ -128,14 +128,14 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
 
 static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
 {
-       ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
-       struct sg_page_iter sg_pg_iter;
+       ssize_t nr_bvec = req->sg_cnt;
        unsigned long bv_cnt = 0;
        bool is_sync = false;
        size_t len = 0, total_len = 0;
        ssize_t ret = 0;
        loff_t pos;
-
+       int i;
+       struct scatterlist *sg;
 
        if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
                is_sync = true;
@@ -147,8 +147,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
        }
 
        memset(&req->f.iocb, 0, sizeof(struct kiocb));
-       for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
-               nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
+       for_each_sg(req->sg, sg, req->sg_cnt, i) {
+               nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
                len += req->f.bvec[bv_cnt].bv_len;
                total_len += req->f.bvec[bv_cnt].bv_len;
                bv_cnt++;
@@ -225,7 +225,7 @@ static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
 
 static void nvmet_file_execute_rw(struct nvmet_req *req)
 {
-       ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
+       ssize_t nr_bvec = req->sg_cnt;
 
        if (!req->sg_cnt || !nr_bvec) {
                nvmet_req_complete(req, 0);
index 51e49efd7849df640b5e7cb9fa9715ada7d373e4..1653d19b187fd5de826875cdcf5675c8fcb4431c 100644 (file)
@@ -428,6 +428,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
 
 u32 nvmet_get_log_page_len(struct nvme_command *cmd);
+u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
 
 extern struct list_head *nvmet_ports;
 void nvmet_port_disc_changed(struct nvmet_port *port,
index 810ab0fbcccbf844d6af9fe6eb40e155355d9731..d820f3edd4311821696e6045d843024166b83dfc 100644 (file)
@@ -7,7 +7,6 @@
  */
 #include <linux/etherdevice.h>
 #include <linux/kernel.h>
-#include <linux/nvmem-consumer.h>
 #include <linux/of_net.h>
 #include <linux/phy.h>
 #include <linux/export.h>
index 1be571c20062c53341e141791b7137bae129ec6e..6bad04cbb1d37b8e9a6227e8d05eca6ce8545642 100644 (file)
 #define DBG_IRT(x...)
 #endif
 
+#ifdef CONFIG_64BIT
+#define COMPARE_IRTE_ADDR(irte, hpa)   ((irte)->dest_iosapic_addr == (hpa))
+#else
 #define COMPARE_IRTE_ADDR(irte, hpa)   \
-               ((irte)->dest_iosapic_addr == F_EXTEND(hpa))
+               ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL))
+#endif
 
 #define IOSAPIC_REG_SELECT              0x00
 #define IOSAPIC_REG_WINDOW              0x10
index 56dd83a45e55dc21360f729c488a213a2a258241..5484a46dafda857a7e64207ccac4a8249cf1512c 100644 (file)
@@ -213,12 +213,10 @@ void parport_daisy_fini(struct parport *port)
 struct pardevice *parport_open(int devnum, const char *name)
 {
        struct daisydev *p = topology;
-       struct pardev_cb par_cb;
        struct parport *port;
        struct pardevice *dev;
        int daisy;
 
-       memset(&par_cb, 0, sizeof(par_cb));
        spin_lock(&topology_lock);
        while (p && p->devnum != devnum)
                p = p->next;
@@ -232,7 +230,7 @@ struct pardevice *parport_open(int devnum, const char *name)
        port = parport_get_port(p->port);
        spin_unlock(&topology_lock);
 
-       dev = parport_register_dev_model(port, name, &par_cb, devnum);
+       dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL);
        parport_put_port(port);
        if (!dev)
                return NULL;
@@ -482,31 +480,3 @@ static int assign_addrs(struct parport *port)
        kfree(deviceid);
        return detected;
 }
-
-static int daisy_drv_probe(struct pardevice *par_dev)
-{
-       struct device_driver *drv = par_dev->dev.driver;
-
-       if (strcmp(drv->name, "daisy_drv"))
-               return -ENODEV;
-       if (strcmp(par_dev->name, daisy_dev_name))
-               return -ENODEV;
-
-       return 0;
-}
-
-static struct parport_driver daisy_driver = {
-       .name = "daisy_drv",
-       .probe = daisy_drv_probe,
-       .devmodel = true,
-};
-
-int daisy_drv_init(void)
-{
-       return parport_register_driver(&daisy_driver);
-}
-
-void daisy_drv_exit(void)
-{
-       parport_unregister_driver(&daisy_driver);
-}
index e5e6a463a9412e167a9e2b2c34f4a6cfb3a1cb2a..e035174ba205d12dbc6e529c6ec85c8bda9e5d21 100644 (file)
@@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
 ssize_t parport_device_id (int devnum, char *buffer, size_t count)
 {
        ssize_t retval = -ENXIO;
-       struct pardevice *dev = parport_open(devnum, daisy_dev_name);
+       struct pardevice *dev = parport_open (devnum, "Device ID probe");
        if (!dev)
                return -ENXIO;
 
index 0171b8dbcdcd5f57c54eeee19eed23d65dbd8897..5dc53d420ca8ca805c0c036c23e3c1a3fc42ac00 100644 (file)
@@ -137,19 +137,11 @@ static struct bus_type parport_bus_type = {
 
 int parport_bus_init(void)
 {
-       int retval;
-
-       retval = bus_register(&parport_bus_type);
-       if (retval)
-               return retval;
-       daisy_drv_init();
-
-       return 0;
+       return bus_register(&parport_bus_type);
 }
 
 void parport_bus_exit(void)
 {
-       daisy_drv_exit();
        bus_unregister(&parport_bus_type);
 }
 
index 3f3df4c29f6e1d40112343e91f902735a6b4d535..905282a8ddaaeda2f8df06570bfb3716e2b2479c 100644 (file)
@@ -115,6 +115,10 @@ static void remove_board(struct controller *ctrl, bool safe_removal)
                 * removed from the slot/adapter.
                 */
                msleep(1000);
+
+               /* Ignore link or presence changes caused by power off */
+               atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
+                          &ctrl->pending_events);
        }
 
        /* turn off Green LED */
index 7c1b362f599aebc4bbf66acb09fe67fb5d76ea39..766f5779db929fca76738a91df9793fc6af3fedb 100644 (file)
@@ -6262,8 +6262,7 @@ static int __init pci_setup(char *str)
                        } else if (!strncmp(str, "pcie_scan_all", 13)) {
                                pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
                        } else if (!strncmp(str, "disable_acs_redir=", 18)) {
-                               disable_acs_redir_param =
-                                       kstrdup(str + 18, GFP_KERNEL);
+                               disable_acs_redir_param = str + 18;
                        } else {
                                printk(KERN_ERR "PCI: Unknown option `%s'\n",
                                                str);
@@ -6274,3 +6273,19 @@ static int __init pci_setup(char *str)
        return 0;
 }
 early_param("pci", pci_setup);
+
+/*
+ * 'disable_acs_redir_param' is initialized in pci_setup(), above, to point
+ * to data in the __initdata section which will be freed after the init
+ * sequence is complete. We can't allocate memory in pci_setup() because some
+ * architectures do not have any memory allocation service available during
+ * an early_param() call. So we allocate memory and copy the variable here
+ * before the init section is freed.
+ */
+static int __init pci_realloc_setup_params(void)
+{
+       disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
+
+       return 0;
+}
+pure_initcall(pci_realloc_setup_params);
index 224d886341158ba55494da1c766a933b0cfaeefc..d994839a3e24b5ec8c1452f3489c47d8fa7aba20 100644 (file)
@@ -273,6 +273,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
                           enum pcie_link_width *width);
 void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
+void pcie_report_downtraining(struct pci_dev *dev);
 
 /* Single Root I/O Virtualization */
 struct pci_sriov {
index 5cbdbca904ac8e50760abbc4eecf834def9ab875..362eb8cfa53ba040c5ab955a03dd9773bb751ad3 100644 (file)
@@ -142,3 +142,11 @@ config PCIE_PTM
 
          This is only useful if you have devices that support PTM, but it
          is safe to enable even if you don't.
+
+config PCIE_BW
+       bool "PCI Express Bandwidth Change Notification"
+       depends on PCIEPORTBUS
+       help
+         This enables PCI Express Bandwidth Change Notification.  If
+         you know link width or rate changes occur only to correct
+         unreliable links, you may answer Y.
index f1d7bc1e5efae2561fecba886b15a17a17ee5c4b..efb9d2e71e9eecad1bc0298692d166114de5be6a 100644 (file)
@@ -3,7 +3,6 @@
 # Makefile for PCI Express features and port driver
 
 pcieportdrv-y                  := portdrv_core.o portdrv_pci.o err.o
-pcieportdrv-y                  += bw_notification.o
 
 obj-$(CONFIG_PCIEPORTBUS)      += pcieportdrv.o
 
@@ -13,3 +12,4 @@ obj-$(CONFIG_PCIEAER_INJECT)  += aer_inject.o
 obj-$(CONFIG_PCIE_PME)         += pme.o
 obj-$(CONFIG_PCIE_DPC)         += dpc.o
 obj-$(CONFIG_PCIE_PTM)         += ptm.o
+obj-$(CONFIG_PCIE_BW)          += bw_notification.o
index d2eae3b7cc0f74d5c8fdec80fa6ffffd68dd8501..4fa9e3523ee1a22bc763aa5ea0f162dc00ab09dd 100644 (file)
@@ -30,6 +30,8 @@ static void pcie_enable_link_bandwidth_notification(struct pci_dev *dev)
 {
        u16 lnk_ctl;
 
+       pcie_capability_write_word(dev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
+
        pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl);
        lnk_ctl |= PCI_EXP_LNKCTL_LBMIE;
        pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
@@ -44,11 +46,10 @@ static void pcie_disable_link_bandwidth_notification(struct pci_dev *dev)
        pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
 }
 
-static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
+static irqreturn_t pcie_bw_notification_irq(int irq, void *context)
 {
        struct pcie_device *srv = context;
        struct pci_dev *port = srv->port;
-       struct pci_dev *dev;
        u16 link_status, events;
        int ret;
 
@@ -58,17 +59,26 @@ static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
        if (ret != PCIBIOS_SUCCESSFUL || !events)
                return IRQ_NONE;
 
+       pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
+       pcie_update_link_speed(port->subordinate, link_status);
+       return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
+{
+       struct pcie_device *srv = context;
+       struct pci_dev *port = srv->port;
+       struct pci_dev *dev;
+
        /*
         * Print status from downstream devices, not this root port or
         * downstream switch port.
         */
        down_read(&pci_bus_sem);
        list_for_each_entry(dev, &port->subordinate->devices, bus_list)
-               __pcie_print_link_status(dev, false);
+               pcie_report_downtraining(dev);
        up_read(&pci_bus_sem);
 
-       pcie_update_link_speed(port->subordinate, link_status);
-       pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
        return IRQ_HANDLED;
 }
 
@@ -80,7 +90,8 @@ static int pcie_bandwidth_notification_probe(struct pcie_device *srv)
        if (!pcie_link_bandwidth_notification_supported(srv->port))
                return -ENODEV;
 
-       ret = request_threaded_irq(srv->irq, NULL, pcie_bw_notification_handler,
+       ret = request_threaded_irq(srv->irq, pcie_bw_notification_irq,
+                                  pcie_bw_notification_handler,
                                   IRQF_SHARED, "PCIe BW notif", srv);
        if (ret)
                return ret;
index 1d50dc58ac400ae1a325f788ee33352aebf58c09..944827a8c7d363f0066f8002e891a988b11ca565 100644 (file)
@@ -49,7 +49,11 @@ int pcie_dpc_init(void);
 static inline int pcie_dpc_init(void) { return 0; }
 #endif
 
+#ifdef CONFIG_PCIE_BW
 int pcie_bandwidth_notification_init(void);
+#else
+static inline int pcie_bandwidth_notification_init(void) { return 0; }
+#endif
 
 /* Port Type */
 #define PCIE_ANY_PORT                  (~0)
index 7d04f9d087a62a94cf4edd5fdab4749d752f2c4e..1b330129089fea765919e7ae477298473edb843c 100644 (file)
@@ -55,7 +55,8 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask,
         * 7.8.2, 7.10.10, 7.31.2.
         */
 
-       if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
+       if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
+                   PCIE_PORT_SERVICE_BWNOTIF)) {
                pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
                *pme = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
                nvec = *pme + 1;
index 2ec0df04e0dca15ce1f56b3f9049280f199e0928..7e12d016386394ab9b401f3e5dcb8da8b917484c 100644 (file)
@@ -2388,7 +2388,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
        return dev;
 }
 
-static void pcie_report_downtraining(struct pci_dev *dev)
+void pcie_report_downtraining(struct pci_dev *dev)
 {
        if (!pci_is_pcie(dev))
                return;
index a59ad09ce911d564c074930ea22968fcfab928e7..a077f67fe1dac17508d09e954cf4e5acead355d6 100644 (file)
@@ -3877,6 +3877,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
                         quirk_dma_func1_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
+                        quirk_dma_func1_alias);
 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
                         quirk_dma_func1_alias);
index 5163097b43dff1472af1b905936588750d45b9a8..4bbd9ede38c8355a9bf226e80eaabc19bc9eda6e 100644 (file)
@@ -485,8 +485,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy,
        struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
        int new_mode;
 
-       if (phy->index != 0)
+       if (phy->index != 0) {
+               if (mode == PHY_MODE_USB_HOST)
+                       return 0;
                return -EINVAL;
+       }
 
        switch (mode) {
        case PHY_MODE_USB_HOST:
index 900c7073c46f4e4ca530463fcfa91e79b38b10b4..71308766e89199f443047c715f5ce3206375df70 100644 (file)
@@ -440,7 +440,7 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
 
        ret = cros_ec_create_pdinfo(debug_info);
        if (ret)
-               goto remove_debugfs;
+               goto remove_log;
 
        ec->debug_info = debug_info;
 
@@ -448,6 +448,8 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
 
        return 0;
 
+remove_log:
+       cros_ec_cleanup_console_log(debug_info);
 remove_debugfs:
        debugfs_remove_recursive(debug_info->dir);
        return ret;
@@ -467,7 +469,8 @@ static int __maybe_unused cros_ec_debugfs_suspend(struct device *dev)
 {
        struct cros_ec_dev *ec = dev_get_drvdata(dev);
 
-       cancel_delayed_work_sync(&ec->debug_info->log_poll_work);
+       if (ec->debug_info->log_buffer.buf)
+               cancel_delayed_work_sync(&ec->debug_info->log_poll_work);
 
        return 0;
 }
@@ -476,7 +479,8 @@ static int __maybe_unused cros_ec_debugfs_resume(struct device *dev)
 {
        struct cros_ec_dev *ec = dev_get_drvdata(dev);
 
-       schedule_delayed_work(&ec->debug_info->log_poll_work, 0);
+       if (ec->debug_info->log_buffer.buf)
+               schedule_delayed_work(&ec->debug_info->log_poll_work, 0);
 
        return 0;
 }
index f6ff29a11f1ace1bf1234d4617cc84671162eb2c..14355668ddfa3146e88074132737a757a075090a 100644 (file)
@@ -223,11 +223,11 @@ int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg)
                msg->command, msg->type, msg->flags, msg->response_size,
                msg->request_size);
 
+       mutex_lock(&ec->mailbox_lock);
        /* Prepare request packet */
        rq = ec->data_buffer;
        wilco_ec_prepare(msg, rq);
 
-       mutex_lock(&ec->mailbox_lock);
        ret = wilco_ec_transfer(ec, msg, rq);
        mutex_unlock(&ec->mailbox_lock);
 
index 8f018b3f3cd4c42ef40764d39587e7e2e11285d1..c7039f52ad51802afa773525af8eed45e5438ac1 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <linux/debugfs.h>
 #include <linux/device.h>
+#include <linux/dmi.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/platform_data/x86/clk-pmc-atom.h>
@@ -391,11 +392,27 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc)
 }
 #endif /* CONFIG_DEBUG_FS */
 
+/*
+ * Some systems need one or more of their pmc_plt_clks to be
+ * marked as critical.
+ */
+static const struct dmi_system_id critclk_systems[] = {
+       {
+               .ident = "MPL CEC1x",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"),
+               },
+       },
+       { /*sentinel*/ }
+};
+
 static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
                          const struct pmc_data *pmc_data)
 {
        struct platform_device *clkdev;
        struct pmc_clk_data *clk_data;
+       const struct dmi_system_id *d = dmi_first_match(critclk_systems);
 
        clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
        if (!clk_data)
@@ -403,6 +420,10 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
 
        clk_data->base = pmc_regmap; /* offset is added by client */
        clk_data->clks = pmc_data->clks;
+       if (d) {
+               clk_data->critical = true;
+               pr_info("%s critclks quirk enabled\n", d->ident);
+       }
 
        clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom",
                                               PLATFORM_DEVID_NONE,
index 08d5037fd0521523dcfb5d9ce211a117b2ff52e9..6887870ba32c38b075145500b173052f45226206 100644 (file)
@@ -221,6 +221,9 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
        int avg_current;
        u32 cc_lsb;
 
+       if (!divider)
+               return 0;
+
        sample &= 0xffffff;             /* 24-bits, unsigned */
        offset &= 0x7ff;                /* 10-bits, signed */
 
index ad969d9fc9815a173385588e034a1c650ba6c868..c2644a9fe80f1f1432e6e62ce6cbb7d8fbf0986b 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Power supply driver for the goldfish emulator
  *
index dce24f596160973f4b6e27827741d17e86f7e73c..5358a80d854f99e0157a38bea979eb1b0912f46b 100644 (file)
@@ -383,15 +383,11 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
        char *prop_buf;
        char *attrname;
 
-       dev_dbg(dev, "uevent\n");
-
        if (!psy || !psy->desc) {
                dev_dbg(dev, "No power supply yet\n");
                return ret;
        }
 
-       dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->desc->name);
-
        ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->desc->name);
        if (ret)
                return ret;
@@ -427,8 +423,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
                        goto out;
                }
 
-               dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf);
-
                ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
                kfree(attrname);
                if (ret)
index 91751617b37af33b6241be4d13647775d5444881..c53a2185a0393c689c631d7df7f1b760cb2b72e0 100644 (file)
@@ -130,6 +130,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev)
        arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits);
        arb->rstc.ops = &meson_audio_arb_rstc_ops;
        arb->rstc.of_node = dev->of_node;
+       arb->rstc.owner = THIS_MODULE;
 
        /*
         * Enable general :
index a71734c416939354129253af00090975ff0f1b5e..f933c06bff4f804a3e77408d51fe15606b62e135 100644 (file)
@@ -667,9 +667,9 @@ config RTC_DRV_S5M
          will be called rtc-s5m.
 
 config RTC_DRV_SD3078
-    tristate "ZXW Crystal SD3078"
+    tristate "ZXW Shenzhen whwave SD3078"
     help
-      If you say yes here you get support for the ZXW Crystal
+      If you say yes here you get support for the ZXW Shenzhen whwave
       SD3078 RTC chips.
 
       This driver can also be built as a module. If so, the module
index e5444296075ee147e74c93d35d3eb98e6a2b0c48..4d6bf9304ceb35932dfadbc921b2e658e5e3d2ec 100644 (file)
@@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev)
        struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
 
        if (device_may_wakeup(dev))
-               enable_irq_wake(cros_ec_rtc->cros_ec->irq);
+               return enable_irq_wake(cros_ec_rtc->cros_ec->irq);
 
        return 0;
 }
@@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev)
        struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
 
        if (device_may_wakeup(dev))
-               disable_irq_wake(cros_ec_rtc->cros_ec->irq);
+               return disable_irq_wake(cros_ec_rtc->cros_ec->irq);
 
        return 0;
 }
index b4e054c64bad9e54d23adb3a47da1008224906b3..69b54e5556c06234c5339431f3149bc923ebcf49 100644 (file)
@@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev)
        da9063_data_to_tm(data, &rtc->alarm_time, rtc);
        rtc->rtc_sync = false;
 
+       /*
+        * TODO: some models have alarms on a minute boundary but still support
+        * real hardware interrupts. Add this once the core supports it.
+        */
+       if (config->rtc_data_start != RTC_SEC)
+               rtc->rtc_dev->uie_unsupported = 1;
+
        irq_alarm = platform_get_irq_byname(pdev, "ALARM");
        ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
                                        da9063_alarm_event,
index d417b203cbc553eb25ab0cbf0eb493bcd84c9d46..1d3de2a3d1a4d7a0ad5a7d327efaaca0d72f468c 100644 (file)
@@ -374,7 +374,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
 static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
 {
        unsigned int byte;
-       int value = 0xff;       /* return 0xff for ignored values */
+       int value = -1;                 /* return -1 for ignored values */
 
        byte = readb(rtc->regbase + reg_off);
        if (byte & AR_ENB) {
index 6e294b4d3635fe399586f05045297646d9c8c574..f89f9d02e7884f321f858f18a020e122d83c8a03 100644 (file)
@@ -2004,14 +2004,14 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
        blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
 
 raw:
-       block->blocks = (private->real_cyl *
+       block->blocks = ((unsigned long) private->real_cyl *
                          private->rdc_data.trk_per_cyl *
                          blk_per_trk);
 
        dev_info(&device->cdev->dev,
-                "DASD with %d KB/block, %d KB total size, %d KB/track, "
+                "DASD with %u KB/block, %lu KB total size, %u KB/track, "
                 "%s\n", (block->bp_block >> 10),
-                ((private->real_cyl *
+                (((unsigned long) private->real_cyl *
                   private->rdc_data.trk_per_cyl *
                   blk_per_trk * (block->bp_block >> 9)) >> 1),
                 ((blk_per_trk * block->bp_block) >> 10),
index fd2146bcc0add9aae3b71ba4cc88b788b7702591..e17364e13d2f71ec289a47f6a79f7c56ae85b264 100644 (file)
@@ -629,7 +629,7 @@ con3270_init(void)
                     (void (*)(unsigned long)) con3270_read_tasklet,
                     (unsigned long) condev->read);
 
-       raw3270_add_view(&condev->view, &con3270_fn, 1);
+       raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ);
 
        INIT_LIST_HEAD(&condev->freemem);
        for (i = 0; i < CON3270_STRING_PAGES; i++) {
index 8f3a2eeb28dca0b579d2d773057296e92f379342..8b48ba9c598ecedcac5ca78c86f97d3587e71c7d 100644 (file)
@@ -463,7 +463,8 @@ fs3270_open(struct inode *inode, struct file *filp)
 
        init_waitqueue_head(&fp->wait);
        fp->fs_pid = get_pid(task_pid(current));
-       rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
+       rc = raw3270_add_view(&fp->view, &fs3270_fn, minor,
+                             RAW3270_VIEW_LOCK_BH);
        if (rc) {
                fs3270_free_view(&fp->view);
                goto out;
index f8cd2935fbfd48c5aef1ad980457cc55433b6db4..63a41b16876102a8f1210396f1970d0d5e77df18 100644 (file)
@@ -920,7 +920,7 @@ raw3270_deactivate_view(struct raw3270_view *view)
  * Add view to device with minor "minor".
  */
 int
-raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
+raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass)
 {
        unsigned long flags;
        struct raw3270 *rp;
@@ -942,6 +942,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
                view->cols = rp->cols;
                view->ascebc = rp->ascebc;
                spin_lock_init(&view->lock);
+               lockdep_set_subclass(&view->lock, subclass);
                list_add(&view->list, &rp->view_list);
                rc = 0;
                spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
index 114ca7cbf8897dce734e59cb283923e2c160b3bf..3afaa35f73513cba47566e9601b775339e6cdf78 100644 (file)
@@ -150,6 +150,8 @@ struct raw3270_fn {
 struct raw3270_view {
        struct list_head list;
        spinlock_t lock;
+#define RAW3270_VIEW_LOCK_IRQ  0
+#define RAW3270_VIEW_LOCK_BH   1
        atomic_t ref_count;
        struct raw3270 *dev;
        struct raw3270_fn *fn;
@@ -158,7 +160,7 @@ struct raw3270_view {
        unsigned char *ascebc;          /* ascii -> ebcdic table */
 };
 
-int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
+int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int);
 int raw3270_activate_view(struct raw3270_view *);
 void raw3270_del_view(struct raw3270_view *);
 void raw3270_deactivate_view(struct raw3270_view *);
index 2b0c36c2c5688ebf6ef0266d66cad52793b7ae1b..98d7fc152e32f85e8e53e1e56b26244753c67a00 100644 (file)
@@ -980,7 +980,8 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
                return PTR_ERR(tp);
 
        rc = raw3270_add_view(&tp->view, &tty3270_fn,
-                             tty->index + RAW3270_FIRSTMINOR);
+                             tty->index + RAW3270_FIRSTMINOR,
+                             RAW3270_VIEW_LOCK_BH);
        if (rc) {
                tty3270_free_view(tp);
                return rc;
index 4159c63a5fd2bbba9b9c2949fde8c56ba9030a89..a835b31aad999dcbc90847455b0c75f612aba563 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/crw.h>
 #include <asm/isc.h>
 #include <asm/ebcdic.h>
+#include <asm/ap.h>
 
 #include "css.h"
 #include "cio.h"
@@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
                              " failed (rc=%d).\n", ret);
 }
 
+static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
+{
+       CIO_CRW_EVENT(3, "chsc: ap config changed\n");
+       if (sei_area->rs != 5)
+               return;
+
+       ap_bus_cfg_chg();
+}
+
 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
 {
        switch (sei_area->cc) {
@@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
        case 2: /* i/o resource accessibility */
                chsc_process_sei_res_acc(sei_area);
                break;
+       case 3: /* ap config changed */
+               chsc_process_sei_ap_cfg_chg(sei_area);
+               break;
        case 7: /* channel-path-availability information */
                chsc_process_sei_chp_avail(sei_area);
                break;
index a10cec0e86eb495ffd45f3854a09e1a76bf3e598..0b3b9de45c602042384751921379b0d903e5be79 100644 (file)
@@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
 {
        struct vfio_ccw_private *private;
        struct irb *irb;
+       bool is_final;
 
        private = container_of(work, struct vfio_ccw_private, io_work);
        irb = &private->irb;
 
+       is_final = !(scsw_actl(&irb->scsw) &
+                    (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
        if (scsw_is_solicited(&irb->scsw)) {
                cp_update_scsw(&private->cp, &irb->scsw);
-               cp_free(&private->cp);
+               if (is_final)
+                       cp_free(&private->cp);
        }
        memcpy(private->io_region->irb_area, irb, sizeof(*irb));
 
        if (private->io_trigger)
                eventfd_signal(private->io_trigger, 1);
 
-       if (private->mdev)
+       if (private->mdev && is_final)
                private->state = VFIO_CCW_STATE_IDLE;
 }
 
index e15816ff126582f933c66add86bb45e7b0606e0f..1546389d71dbca7ebc1f2f103780182742226376 100644 (file)
@@ -810,11 +810,18 @@ static int ap_device_remove(struct device *dev)
        struct ap_device *ap_dev = to_ap_dev(dev);
        struct ap_driver *ap_drv = ap_dev->drv;
 
+       /* prepare ap queue device removal */
        if (is_queue_dev(dev))
-               ap_queue_remove(to_ap_queue(dev));
+               ap_queue_prepare_remove(to_ap_queue(dev));
+
+       /* driver's chance to clean up gracefully */
        if (ap_drv->remove)
                ap_drv->remove(ap_dev);
 
+       /* now do the ap queue device remove */
+       if (is_queue_dev(dev))
+               ap_queue_remove(to_ap_queue(dev));
+
        /* Remove queue/card from list of active queues/cards */
        spin_lock_bh(&ap_list_lock);
        if (is_card_dev(dev))
@@ -860,6 +867,16 @@ void ap_bus_force_rescan(void)
 }
 EXPORT_SYMBOL(ap_bus_force_rescan);
 
+/*
+* A config change has happened, force an ap bus rescan.
+*/
+void ap_bus_cfg_chg(void)
+{
+       AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__);
+
+       ap_bus_force_rescan();
+}
+
 /*
  * hex2bitmap() - parse hex mask string and set bitmap.
  * Valid strings are "0x012345678" with at least one valid hex number.
index d0059eae5d94bd51a5c677c28162ed63c9f0d437..15a98a673c5cc3323980f15e95d3418b1c65e028 100644 (file)
@@ -91,6 +91,7 @@ enum ap_state {
        AP_STATE_WORKING,
        AP_STATE_QUEUE_FULL,
        AP_STATE_SUSPEND_WAIT,
+       AP_STATE_REMOVE,        /* about to be removed from driver */
        AP_STATE_UNBOUND,       /* momentary not bound to a driver */
        AP_STATE_BORKED,        /* broken */
        NR_AP_STATES
@@ -252,6 +253,7 @@ void ap_bus_force_rescan(void);
 
 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+void ap_queue_prepare_remove(struct ap_queue *aq);
 void ap_queue_remove(struct ap_queue *aq);
 void ap_queue_suspend(struct ap_device *ap_dev);
 void ap_queue_resume(struct ap_device *ap_dev);
index ba261210c6da0518fe7f8f4cb8f702b0503464b9..5ea83dc4f1d740e9db1288ed9d8f70423312d5ba 100644 (file)
@@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
                [AP_EVENT_POLL] = ap_sm_suspend_read,
                [AP_EVENT_TIMEOUT] = ap_sm_nop,
        },
+       [AP_STATE_REMOVE] = {
+               [AP_EVENT_POLL] = ap_sm_nop,
+               [AP_EVENT_TIMEOUT] = ap_sm_nop,
+       },
        [AP_STATE_UNBOUND] = {
                [AP_EVENT_POLL] = ap_sm_nop,
                [AP_EVENT_TIMEOUT] = ap_sm_nop,
@@ -740,18 +744,31 @@ void ap_flush_queue(struct ap_queue *aq)
 }
 EXPORT_SYMBOL(ap_flush_queue);
 
-void ap_queue_remove(struct ap_queue *aq)
+void ap_queue_prepare_remove(struct ap_queue *aq)
 {
-       ap_flush_queue(aq);
+       spin_lock_bh(&aq->lock);
+       /* flush queue */
+       __ap_flush_queue(aq);
+       /* set REMOVE state to prevent new messages are queued in */
+       aq->state = AP_STATE_REMOVE;
+       spin_unlock_bh(&aq->lock);
        del_timer_sync(&aq->timeout);
+}
 
-       /* reset with zero, also clears irq registration */
+void ap_queue_remove(struct ap_queue *aq)
+{
+       /*
+        * all messages have been flushed and the state is
+        * AP_STATE_REMOVE. Now reset with zero which also
+        * clears the irq registration and move the state
+        * to AP_STATE_UNBOUND to signal that this queue
+        * is not used by any driver currently.
+        */
        spin_lock_bh(&aq->lock);
        ap_zapq(aq->qid);
        aq->state = AP_STATE_UNBOUND;
        spin_unlock_bh(&aq->lock);
 }
-EXPORT_SYMBOL(ap_queue_remove);
 
 void ap_queue_reinit_state(struct ap_queue *aq)
 {
@@ -760,4 +777,3 @@ void ap_queue_reinit_state(struct ap_queue *aq)
        ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
        spin_unlock_bh(&aq->lock);
 }
-EXPORT_SYMBOL(ap_queue_reinit_state);
index 3e85d665c572957aa491917b1433b5254812b0f6..45eb0c14b8807d17c228ef563506e82b6a50d533 100644 (file)
@@ -51,7 +51,8 @@ static debug_info_t *debug_info;
 
 static void __init pkey_debug_init(void)
 {
-       debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long));
+       /* 5 arguments per dbf entry (including the format string ptr) */
+       debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
        debug_register_view(debug_info, &debug_sprintf_view);
        debug_set_level(debug_info, 3);
 }
index eb93c2d27d0ad142c4d977d74df3e415468336af..689c2af7026a3adcf08e2e6eb019d9352e6de9d4 100644 (file)
@@ -586,6 +586,7 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
 
 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
                                                     struct zcrypt_queue *zq,
+                                                    struct module **pmod,
                                                     unsigned int weight)
 {
        if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
@@ -595,15 +596,15 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
        atomic_add(weight, &zc->load);
        atomic_add(weight, &zq->load);
        zq->request_count++;
+       *pmod = zq->queue->ap_dev.drv->driver.owner;
        return zq;
 }
 
 static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
                                     struct zcrypt_queue *zq,
+                                    struct module *mod,
                                     unsigned int weight)
 {
-       struct module *mod = zq->queue->ap_dev.drv->driver.owner;
-
        zq->request_count--;
        atomic_sub(weight, &zc->load);
        atomic_sub(weight, &zq->load);
@@ -653,6 +654,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
        unsigned int weight, pref_weight;
        unsigned int func_code;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
 
@@ -706,7 +708,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -718,7 +720,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
        rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out:
@@ -735,6 +737,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
        unsigned int weight, pref_weight;
        unsigned int func_code;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(crt, TP_ICARSACRT);
 
@@ -788,7 +791,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -800,7 +803,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
        rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out:
@@ -819,6 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
        unsigned int func_code;
        unsigned short *domain;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
 
@@ -865,7 +869,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -881,7 +885,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
        rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out:
@@ -932,6 +936,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
        unsigned int func_code;
        struct ap_message ap_msg;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
 
@@ -1000,7 +1005,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -1012,7 +1017,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
        rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out_free:
@@ -1033,6 +1038,7 @@ static long zcrypt_rng(char *buffer)
        struct ap_message ap_msg;
        unsigned int domain;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
 
@@ -1064,7 +1070,7 @@ static long zcrypt_rng(char *buffer)
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -1076,7 +1082,7 @@ static long zcrypt_rng(char *buffer)
        rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out:
index 7617d21cb2960618cbc097bbf85cb8515234aa14..f63c5c871d3ddf48f4a88fe3c2b2db684394c7b3 100644 (file)
@@ -1595,6 +1595,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
                if (priv->channel[direction] == NULL) {
                        if (direction == CTCM_WRITE)
                                channel_free(priv->channel[CTCM_READ]);
+                       result = -ENODEV;
                        goto out_dev;
                }
                priv->channel[direction]->netdev = dev;
index 197b0f5b63e7183473b91a0d8d8f728bb3e0c16e..44bd6f04c145da55b1aef66ad147983d74cedf9a 100644 (file)
@@ -1150,13 +1150,16 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
 
 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
 {
+       struct sk_buff *skb;
+
        /* release may never happen from within CQ tasklet scope */
        WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
 
        if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
                qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
 
-       __skb_queue_purge(&buf->skb_list);
+       while ((skb = __skb_dequeue(&buf->skb_list)) != NULL)
+               consume_skb(skb);
 }
 
 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
index 8efb2e8ff8f460adacd53376d5cddd3fc6953b03..c3067fd3bd9ee47ad79d106cd3b17067ea91fbf3 100644 (file)
@@ -629,8 +629,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
        } /* else fall through */
 
        QETH_TXQ_STAT_INC(queue, tx_dropped);
-       QETH_TXQ_STAT_INC(queue, tx_errors);
-       dev_kfree_skb_any(skb);
+       kfree_skb(skb);
        netif_wake_queue(dev);
        return NETDEV_TX_OK;
 }
@@ -645,6 +644,8 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
        int rc;
 
+       qeth_l2_vnicc_set_defaults(card);
+
        if (gdev->dev.type == &qeth_generic_devtype) {
                rc = qeth_l2_create_device_attributes(&gdev->dev);
                if (rc)
@@ -652,8 +653,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
        }
 
        hash_init(card->mac_htable);
-       card->info.hwtrap = 0;
-       qeth_l2_vnicc_set_defaults(card);
        return 0;
 }
 
index 7e68d9d16859d24eaae38d3f0a8079f40cbabdf2..53712cf2640659cb0da642ba1dc05bb03e6567ec 100644 (file)
@@ -2096,8 +2096,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
 
 tx_drop:
        QETH_TXQ_STAT_INC(queue, tx_dropped);
-       QETH_TXQ_STAT_INC(queue, tx_errors);
-       dev_kfree_skb_any(skb);
+       kfree_skb(skb);
        netif_wake_queue(dev);
        return NETDEV_TX_OK;
 }
@@ -2253,14 +2252,15 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
        int rc;
 
+       hash_init(card->ip_htable);
+
        if (gdev->dev.type == &qeth_generic_devtype) {
                rc = qeth_l3_create_device_attributes(&gdev->dev);
                if (rc)
                        return rc;
        }
-       hash_init(card->ip_htable);
+
        hash_init(card->ip_mc_htable);
-       card->info.hwtrap = 0;
        return 0;
 }
 
index 744a64680d5b0d16c982012bfe2b351becd54a9a..e8fc28dba8dfc3521532c3c87d26d199b8ed9b6c 100644 (file)
@@ -624,6 +624,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
        add_timer(&erp_action->timer);
 }
 
+void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+                                    int clear, char *dbftag)
+{
+       unsigned long flags;
+       struct zfcp_port *port;
+
+       write_lock_irqsave(&adapter->erp_lock, flags);
+       read_lock(&adapter->port_list_lock);
+       list_for_each_entry(port, &adapter->port_list, list)
+               _zfcp_erp_port_forced_reopen(port, clear, dbftag);
+       read_unlock(&adapter->port_list_lock);
+       write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
 static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
                                      int clear, char *dbftag)
 {
@@ -1341,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
                struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
                int lun_status;
 
+               if (sdev->sdev_state == SDEV_DEL ||
+                   sdev->sdev_state == SDEV_CANCEL)
+                       continue;
                if (zsdev->port != port)
                        continue;
                /* LUN under port of interest */
index 3fce47b0b21b55142a64bb3b838bf28168ddd89e..c6acca521ffec71ee7b3f7e7231a32b18fdceff7 100644 (file)
@@ -70,6 +70,8 @@ extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
                                 char *dbftag);
 extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
 extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+                                           int clear, char *dbftag);
 extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
index db00b5e3abbe361143c83dc5d6becfaa0e62aac0..33eddb02ee300238897f0f9018119717b387fd58 100644 (file)
@@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
        list_for_each_entry(port, &adapter->port_list, list) {
                if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
                        zfcp_fc_test_link(port);
-               if (!port->d_id)
-                       zfcp_erp_port_reopen(port,
-                                            ZFCP_STATUS_COMMON_ERP_FAILED,
-                                            "fcrscn1");
        }
        read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
@@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
 static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
 {
        struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
+       struct zfcp_adapter *adapter = fsf_req->adapter;
        struct fc_els_rscn *head;
        struct fc_els_rscn_page *page;
        u16 i;
@@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
        no_entries = be16_to_cpu(head->rscn_plen) /
                sizeof(struct fc_els_rscn_page);
 
+       if (no_entries > 1) {
+               /* handle failed ports */
+               unsigned long flags;
+               struct zfcp_port *port;
+
+               read_lock_irqsave(&adapter->port_list_lock, flags);
+               list_for_each_entry(port, &adapter->port_list, list) {
+                       if (port->d_id)
+                               continue;
+                       zfcp_erp_port_reopen(port,
+                                            ZFCP_STATUS_COMMON_ERP_FAILED,
+                                            "fcrscn1");
+               }
+               read_unlock_irqrestore(&adapter->port_list_lock, flags);
+       }
+
        for (i = 1; i < no_entries; i++) {
                /* skip head and start with 1st element */
                page++;
index f4f6a07c52220234fb0e865ca3f0d87a2d2fdbe0..221d0dfb849329eb5ebf1758004628301b500ba8 100644 (file)
@@ -368,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
        struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
        int ret = SUCCESS, fc_ret;
 
+       if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+               zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
+               zfcp_erp_wait(adapter);
+       }
        zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
        zfcp_erp_wait(adapter);
        fc_ret = fc_block_scsi_eh(scpnt);
index 1df5171594b89dc70087def629200c30b4149d1f..11fb68d7e60de6ed5ab388250691b647cbc030bc 100644 (file)
@@ -2640,9 +2640,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
        return capacity;
 }
 
+static inline int aac_pci_offline(struct aac_dev *dev)
+{
+       return pci_channel_offline(dev->pdev) || dev->handle_pci_error;
+}
+
 static inline int aac_adapter_check_health(struct aac_dev *dev)
 {
-       if (unlikely(pci_channel_offline(dev->pdev)))
+       if (unlikely(aac_pci_offline(dev)))
                return -1;
 
        return (dev)->a_ops.adapter_check_health(dev);
index e67e032936ef015b66c242eaf9c3111cfb3812c2..78430a7b294c6e651024300d86aaec5eecbe53c4 100644 (file)
@@ -672,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
                                        return -ETIMEDOUT;
                                }
 
-                               if (unlikely(pci_channel_offline(dev->pdev)))
+                               if (unlikely(aac_pci_offline(dev)))
                                        return -EFAULT;
 
                                if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -772,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
 
                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 
-               if (unlikely(pci_channel_offline(dev->pdev)))
+               if (unlikely(aac_pci_offline(dev)))
                        return -EFAULT;
 
                fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
index 3d401d02c01955bc02304fe4f761993e73eaad46..bdd177e3d76229bae1fb67003a9045a3f3e26c87 100644 (file)
@@ -91,6 +91,7 @@ aic7770_probe(struct device *dev)
        ahc = ahc_alloc(&aic7xxx_driver_template, name);
        if (ahc == NULL)
                return (ENOMEM);
+       ahc->dev = dev;
        error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data,
                               eisaBase);
        if (error != 0) {
index 5614921b4041acf4a10c004646d93fa1aa0ebff6..88b90f9806c99d04cd07fa632e243e5b5528d651 100644 (file)
@@ -943,6 +943,7 @@ struct ahc_softc {
         * Platform specific device information.
         */
        ahc_dev_softc_t           dev_softc;
+       struct device             *dev;
 
        /*
         * Bus specific device information.
index 3c9c17450bb399b0a9885270c2070bfb7fa6b24c..d5c4a0d2370620afe5a0fe3ad39bd44025c14429 100644 (file)
@@ -860,8 +860,8 @@ int
 ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr,
                 int flags, bus_dmamap_t *mapp)
 {
-       *vaddr = pci_alloc_consistent(ahc->dev_softc,
-                                     dmat->maxsize, mapp);
+       /* XXX: check if we really need the GFP_ATOMIC and unwind this mess! */
+       *vaddr = dma_alloc_coherent(ahc->dev, dmat->maxsize, mapp, GFP_ATOMIC);
        if (*vaddr == NULL)
                return ENOMEM;
        return 0;
@@ -871,8 +871,7 @@ void
 ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat,
                void* vaddr, bus_dmamap_t map)
 {
-       pci_free_consistent(ahc->dev_softc, dmat->maxsize,
-                           vaddr, map);
+       dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map);
 }
 
 int
@@ -1123,8 +1122,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
 
        host->transportt = ahc_linux_transport_template;
 
-       retval = scsi_add_host(host,
-                       (ahc->dev_softc ? &ahc->dev_softc->dev : NULL));
+       retval = scsi_add_host(host, ahc->dev);
        if (retval) {
                printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n");
                scsi_host_put(host);
index 0fc14dac7070ce6bab629c08a2998638fc26553e..717d8d1082ce18ae9899870e43238c1443fdb36f 100644 (file)
@@ -250,6 +250,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        }
        ahc->dev_softc = pci;
+       ahc->dev = &pci->dev;
        error = ahc_pci_config(ahc, entry);
        if (error != 0) {
                ahc_free(ahc);
index 462560b2855e25e1204064c89350fed5653bf36a..469d0bc9f5fe4db6e756e4270bf522e63cd15566 100644 (file)
@@ -1713,8 +1713,11 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
        }
 
 out:
-       if (req->nsge > 0)
+       if (req->nsge > 0) {
                scsi_dma_unmap(cmnd);
+               if (req->dcopy && (host_status == DID_OK))
+                       host_status = csio_scsi_copy_to_sgl(hw, req);
+       }
 
        cmnd->result = (((host_status) << 16) | scsi_status);
        cmnd->scsi_done(cmnd);
index 3c3cf89f713fbfaf8a7c15fbca17120af609fe87..14bac4966c87bf0e8bc601fc3c4e7afb0eb1cec8 100644 (file)
@@ -1801,6 +1801,12 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
        }
        hisi_sas_dereg_device(hisi_hba, device);
 
+       if (dev_is_sata(device)) {
+               rc = hisi_sas_softreset_ata_disk(device);
+               if (rc)
+                       return TMF_RESP_FUNC_FAILED;
+       }
+
        rc = hisi_sas_debug_I_T_nexus_reset(device);
 
        if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
index dbaa4f131433abde497c843c17e2b0e677b4544e..3ad997ac351034bd2e556a17c6f9d14addf2921a 100644 (file)
@@ -139,6 +139,7 @@ static const struct {
        { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
 
        { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
+       { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
 };
 
 static void ibmvfc_npiv_login(struct ibmvfc_host *);
@@ -1494,9 +1495,9 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
        if (rsp->flags & FCP_RSP_LEN_VALID)
                rsp_code = rsp->data.info.rsp_code;
 
-       scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
+       scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
                    "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
-                   cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
+                   cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
                    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
 }
 
@@ -2022,7 +2023,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
                sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
                            "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
                            ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
-                           rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+                           be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
                            fc_rsp->scsi_status);
                rsp_rc = -EIO;
        } else
@@ -2381,7 +2382,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
                sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
                            "flags: %x fcp_rsp: %x, scsi_status: %x\n",
                            ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
-                           rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+                           be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
                            fc_rsp->scsi_status);
                rsp_rc = -EIO;
        } else
@@ -2755,16 +2756,18 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
                ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
                if (crq->format == IBMVFC_PARTITION_MIGRATED) {
                        /* We need to re-setup the interpartition connection */
-                       dev_info(vhost->dev, "Re-enabling adapter\n");
+                       dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
                        vhost->client_migrated = 1;
                        ibmvfc_purge_requests(vhost, DID_REQUEUE);
                        ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
                        ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
-               } else {
-                       dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
+               } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
+                       dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
                        ibmvfc_purge_requests(vhost, DID_ERROR);
                        ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
                        ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
+               } else {
+                       dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
                }
                return;
        case IBMVFC_CRQ_CMD_RSP:
@@ -3348,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
 
                tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
                        ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-                       rsp->status, rsp->error, status);
+                       be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
                break;
        }
 
@@ -3446,9 +3449,10 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
                        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 
                tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
-                       ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error,
-                       ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
-                       ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
+                       ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+                                            be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+                       ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+                       ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
                break;
        }
 
@@ -3619,7 +3623,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
                fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
                tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
                         ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
-                        mad->iu.status, mad->iu.error,
+                        be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
                         ibmvfc_get_fc_type(fc_reason), fc_reason,
                         ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
                break;
@@ -3831,9 +3835,10 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
 
                tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
                        ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-                       rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)),
-                       rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
-                       rsp->fc_explain, status);
+                       be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+                       ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+                       ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
+                       status);
                break;
        }
 
@@ -3959,7 +3964,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
                level += ibmvfc_retry_host_init(vhost);
                ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
                           ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-                          rsp->status, rsp->error);
+                          be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
                break;
        case IBMVFC_MAD_DRIVER_FAILED:
                break;
@@ -4024,7 +4029,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
                        ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
                ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
                           ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-                                               rsp->status, rsp->error);
+                                               be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
                ibmvfc_free_event(evt);
                return;
        case IBMVFC_MAD_CRQ_ERROR:
index b81a53c4a9a8b1020a96a85fd5e84cde2adb9900..459cc288ba1d01abe63c28454bf73c7190bb64a4 100644 (file)
@@ -78,9 +78,14 @@ enum ibmvfc_crq_valid {
        IBMVFC_CRQ_XPORT_EVENT          = 0xFF,
 };
 
-enum ibmvfc_crq_format {
+enum ibmvfc_crq_init_msg {
        IBMVFC_CRQ_INIT                 = 0x01,
        IBMVFC_CRQ_INIT_COMPLETE        = 0x02,
+};
+
+enum ibmvfc_crq_xport_evts {
+       IBMVFC_PARTNER_FAILED           = 0x01,
+       IBMVFC_PARTNER_DEREGISTER       = 0x02,
        IBMVFC_PARTITION_MIGRATED       = 0x06,
 };
 
index 1135e74646e21c6657e5bdd0ad16582f9aca9139..8cec5230fe313fd53557af39b361a044c3eca5f6 100644 (file)
@@ -96,6 +96,7 @@ static int client_reserve = 1;
 static char partition_name[96] = "UNKNOWN";
 static unsigned int partition_number = -1;
 static LIST_HEAD(ibmvscsi_head);
+static DEFINE_SPINLOCK(ibmvscsi_driver_lock);
 
 static struct scsi_transport_template *ibmvscsi_transport_template;
 
@@ -2270,7 +2271,9 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        }
 
        dev_set_drvdata(&vdev->dev, hostdata);
+       spin_lock(&ibmvscsi_driver_lock);
        list_add_tail(&hostdata->host_list, &ibmvscsi_head);
+       spin_unlock(&ibmvscsi_driver_lock);
        return 0;
 
       add_srp_port_failed:
@@ -2292,15 +2295,27 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 static int ibmvscsi_remove(struct vio_dev *vdev)
 {
        struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
-       list_del(&hostdata->host_list);
-       unmap_persist_bufs(hostdata);
+       unsigned long flags;
+
+       srp_remove_host(hostdata->host);
+       scsi_remove_host(hostdata->host);
+
+       purge_requests(hostdata, DID_ERROR);
+
+       spin_lock_irqsave(hostdata->host->host_lock, flags);
        release_event_pool(&hostdata->pool, hostdata);
+       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
        ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
                                        max_events);
 
        kthread_stop(hostdata->work_thread);
-       srp_remove_host(hostdata->host);
-       scsi_remove_host(hostdata->host);
+       unmap_persist_bufs(hostdata);
+
+       spin_lock(&ibmvscsi_driver_lock);
+       list_del(&hostdata->host_list);
+       spin_unlock(&ibmvscsi_driver_lock);
+
        scsi_host_put(hostdata->host);
 
        return 0;
index dfba4921b265a0fa7fdf2409295483a68c5e181c..5bf61431434be73a381fc9d59b6fee3b9441cab7 100644 (file)
@@ -2162,7 +2162,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
                FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
                             fc_rport_state(rdata));
 
-               rdata->flags &= ~FC_RP_STARTED;
                fc_rport_enter_delete(rdata, RPORT_EV_STOP);
                mutex_unlock(&rdata->rp_mutex);
                kref_put(&rdata->kref, fc_rport_destroy);
index c98f264f1d83a030ea8a00678fd586ab70059218..a497b2c0cb798e07d240affc72b4621fa1503f34 100644 (file)
@@ -3878,10 +3878,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
         * wake up the thread.
         */
        spin_lock(&lpfc_cmd->buf_lock);
-       if (unlikely(lpfc_cmd->cur_iocbq.iocb_flag & LPFC_DRIVER_ABORTED)) {
-               lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
-               if (lpfc_cmd->waitq)
-                       wake_up(lpfc_cmd->waitq);
+       lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
+       if (lpfc_cmd->waitq) {
+               wake_up(lpfc_cmd->waitq);
                lpfc_cmd->waitq = NULL;
        }
        spin_unlock(&lpfc_cmd->buf_lock);
index e57774472e752013ce762912a6ceec512905fc2a..1d8c584ec1e9197595acf2baa61bccae4305b646 100644 (file)
@@ -3281,12 +3281,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 
        if (smid < ioc->hi_priority_smid) {
                struct scsiio_tracker *st;
+               void *request;
 
                st = _get_st_from_smid(ioc, smid);
                if (!st) {
                        _base_recovery_check(ioc);
                        return;
                }
+
+               /* Clear MPI request frame */
+               request = mpt3sas_base_get_msg_frame(ioc, smid);
+               memset(request, 0, ioc->request_sz);
+
                mpt3sas_base_clear_st(ioc, st);
                _base_recovery_check(ioc);
                return;
index 8bb5b8f9f4d2cdbbc127c73cda4d9672b4adcf77..1ccfbc7eebe0323ce88b1c450e52bb87aba3c45e 100644 (file)
@@ -1462,11 +1462,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 {
        struct scsi_cmnd *scmd = NULL;
        struct scsiio_tracker *st;
+       Mpi25SCSIIORequest_t *mpi_request;
 
        if (smid > 0  &&
            smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
                u32 unique_tag = smid - 1;
 
+               mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+               /*
+                * If SCSI IO request is outstanding at driver level then
+                * DevHandle filed must be non-zero. If DevHandle is zero
+                * then it means that this smid is free at driver level,
+                * so return NULL.
+                */
+               if (!mpi_request->DevHandle)
+                       return scmd;
+
                scmd = scsi_host_find_tag(ioc->shost, unique_tag);
                if (scmd) {
                        st = scsi_cmd_priv(scmd);
index e74a62448ba466a58c2365546b5d1fc34bafa8e9..e5db9a9954dc0cd015577686c8bdb5efda9f78de 100644 (file)
@@ -1392,10 +1392,8 @@ static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
 
 static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
 {
-       struct qedi_nvm_iscsi_image nvm_image;
-
        qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
-                                              sizeof(nvm_image),
+                                              sizeof(struct qedi_nvm_iscsi_image),
                                               &qedi->nvm_buf_dma, GFP_KERNEL);
        if (!qedi->iscsi_image) {
                QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
@@ -2236,14 +2234,13 @@ static void qedi_boot_release(void *data)
 static int qedi_get_boot_info(struct qedi_ctx *qedi)
 {
        int ret = 1;
-       struct qedi_nvm_iscsi_image nvm_image;
 
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
                  "Get NVM iSCSI CFG image\n");
        ret = qedi_ops->common->nvm_get_image(qedi->cdev,
                                              QED_NVM_IMAGE_ISCSI_CFG,
                                              (char *)qedi->iscsi_image,
-                                             sizeof(nvm_image));
+                                             sizeof(struct qedi_nvm_iscsi_image));
        if (ret)
                QEDI_ERR(&qedi->dbg_ctx,
                         "Could not get NVM image. ret = %d\n", ret);
index 420045155ba042fbf316463fe6c087f0ba032412..0c700b140ce7d943e4df404a493ba2257c886e56 100644 (file)
@@ -4991,6 +4991,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
                if ((domain & 0xf0) == 0xf0)
                        continue;
 
+               /* Bypass if not same domain and area of adapter. */
+               if (area && domain && ((area != vha->d_id.b.area) ||
+                   (domain != vha->d_id.b.domain)) &&
+                   (ha->current_topology == ISP_CFG_NL))
+                       continue;
+
+
                /* Bypass invalid local loop ID. */
                if (loop_id > LAST_LOCAL_LOOP_ID)
                        continue;
index 677f82fdf56fd174c2c033b852431a91ab5b0fe6..91f576d743fe6fa9d2cbd71ba88f80459832d914 100644 (file)
@@ -1517,7 +1517,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
                goto eh_reset_failed;
        }
        err = 2;
-       if (do_reset(fcport, cmd->device->lun, blk_mq_rq_cpu(cmd->request) + 1)
+       if (do_reset(fcport, cmd->device->lun, 1)
                != QLA_SUCCESS) {
                ql_log(ql_log_warn, vha, 0x800c,
                    "do_reset failed for cmd=%p.\n", cmd);
index 16a18d5d856f91725b33e25df042eb21bba8c20a..6e4f4931ae175f806731d2fcb1fbb4ba655cc885 100644 (file)
@@ -3203,6 +3203,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
        if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
                return -EINVAL;
        ep = iscsi_lookup_endpoint(transport_fd);
+       if (!ep)
+               return -EINVAL;
        conn = cls_conn->dd_data;
        qla_conn = conn->dd_data;
        qla_conn->qla_ep = ep->dd_data;
index c4cbfd07b9167f0e29b635b9b24e65a6df3826d9..a08ff3bd63105141840e774fc0af3081aa78178a 100644 (file)
@@ -238,6 +238,7 @@ static struct {
        {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
        {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
        {"SONY", "TSL", NULL, BLIST_FORCELUN},          /* DDS3 & DDS4 autoloaders */
index 5a58cbf3a75da9123899ce668934e002933d1416..c14006ac98f91c6bb3c7d360b7bd78720d19df84 100644 (file)
@@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
        {"NETAPP", "INF-01-00",         "rdac", },
        {"LSI", "INF-01-00",            "rdac", },
        {"ENGENIO", "INF-01-00",        "rdac", },
+       {"LENOVO", "DE_Series",         "rdac", },
        {NULL, NULL,                    NULL },
 };
 
index 20189675677a000325b8185b43b2743413c5171d..07dfc17d48246551a63966444172b4197e30def4 100644 (file)
@@ -585,9 +585,16 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
        if (!blk_rq_is_scsi(req)) {
                WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
                cmd->flags &= ~SCMD_INITIALIZED;
-               destroy_rcu_head(&cmd->rcu);
        }
 
+       /*
+        * Calling rcu_barrier() is not necessary here because the
+        * SCSI error handler guarantees that the function called by
+        * call_rcu() has been called before scsi_end_request() is
+        * called.
+        */
+       destroy_rcu_head(&cmd->rcu);
+
        /*
         * In the MQ case the command gets freed by __blk_mq_end_request,
         * so we have to do all cleanup that depends on it earlier.
@@ -1699,8 +1706,12 @@ out_put_budget:
                        ret = BLK_STS_DEV_RESOURCE;
                break;
        default:
+               if (unlikely(!scsi_device_online(sdev)))
+                       scsi_req(req)->result = DID_NO_CONNECT << 16;
+               else
+                       scsi_req(req)->result = DID_ERROR << 16;
                /*
-                * Make sure to release all allocated ressources when
+                * Make sure to release all allocated resources when
                 * we hit an error, as we will never see this command
                 * again.
                 */
@@ -2541,8 +2552,10 @@ void scsi_device_resume(struct scsi_device *sdev)
         * device deleted during suspend)
         */
        mutex_lock(&sdev->state_mutex);
-       sdev->quiesced_by = NULL;
-       blk_clear_pm_only(sdev->request_queue);
+       if (sdev->quiesced_by) {
+               sdev->quiesced_by = NULL;
+               blk_clear_pm_only(sdev->request_queue);
+       }
        if (sdev->sdev_state == SDEV_QUIESCE)
                scsi_device_set_state(sdev, SDEV_RUNNING);
        mutex_unlock(&sdev->state_mutex);
index 6a9040faed00c93ba5beeda77ffb5b2cbcb6c07e..3b119ca0cc0ce9ba2cfcc95cf78307a96b1d264b 100644 (file)
@@ -771,6 +771,12 @@ store_state_field(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&sdev->state_mutex);
        ret = scsi_device_set_state(sdev, state);
+       /*
+        * If the device state changes to SDEV_RUNNING, we need to run
+        * the queue to avoid I/O hang.
+        */
+       if (ret == 0 && state == SDEV_RUNNING)
+               blk_mq_run_hw_queues(sdev->request_queue, true);
        mutex_unlock(&sdev->state_mutex);
 
        return ret == 0 ? count : -EINVAL;
index 0508831d6fb9cb6d5ff3b90210dcfef71ef465da..0a82e93566dc8516fc0775a8975da3c25453699a 100644 (file)
@@ -2200,6 +2200,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
        scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
        /* flush running scans then delete devices */
        flush_work(&session->scan_work);
+       /* flush running unbind operations */
+       flush_work(&session->unbind_work);
        __iscsi_unbind_session(&session->unbind_work);
 
        /* hw iscsi may not have removed all connections from session */
index 251db30d0882dc83556a688798c4f277072edefe..2b2bc4b49d78a36c737cd9e70666b900ec0fc2b2 100644 (file)
@@ -1415,11 +1415,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
                        scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
        }
 
-       /*
-        * XXX and what if there are packets in flight and this close()
-        * XXX is followed by a "rmmod sd_mod"?
-        */
-
        scsi_disk_put(sdkp);
 }
 
@@ -3076,6 +3071,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
        unsigned int opt_xfer_bytes =
                logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
 
+       if (sdkp->opt_xfer_blocks == 0)
+               return false;
+
        if (sdkp->opt_xfer_blocks > dev_max) {
                sd_first_printk(KERN_WARNING, sdkp,
                                "Optimal transfer size %u logical blocks " \
@@ -3505,9 +3503,21 @@ static void scsi_disk_release(struct device *dev)
 {
        struct scsi_disk *sdkp = to_scsi_disk(dev);
        struct gendisk *disk = sdkp->disk;
-       
+       struct request_queue *q = disk->queue;
+
        ida_free(&sd_index_ida, sdkp->index);
 
+       /*
+        * Wait until all requests that are in progress have completed.
+        * This is necessary to avoid that e.g. scsi_end_request() crashes
+        * due to clearing the disk->private_data pointer. Wait from inside
+        * scsi_disk_release() instead of from sd_release() to avoid that
+        * freezing and unfreezing the request queue affects user space I/O
+        * in case multiple processes open a /dev/sd... node concurrently.
+        */
+       blk_mq_freeze_queue(q);
+       blk_mq_unfreeze_queue(q);
+
        disk->private_data = NULL;
        put_disk(disk);
        put_device(&sdkp->device->sdev_gendev);
index 84380bae20f1ec350d5209931cdca018351199f1..8472de1007fffca12f41823e3ed8e45dac1ee06d 100644 (file)
@@ -385,7 +385,7 @@ enum storvsc_request_type {
  * This is the end of Protocol specific defines.
  */
 
-static int storvsc_ringbuffer_size = (256 * PAGE_SIZE);
+static int storvsc_ringbuffer_size = (128 * 1024);
 static u32 max_outstanding_req_per_channel;
 
 static int storvsc_vcpus_per_sub_channel = 4;
@@ -668,13 +668,22 @@ static void  handle_multichannel_storage(struct hv_device *device, int max_chns)
 {
        struct device *dev = &device->device;
        struct storvsc_device *stor_device;
-       int num_cpus = num_online_cpus();
        int num_sc;
        struct storvsc_cmd_request *request;
        struct vstor_packet *vstor_packet;
        int ret, t;
 
-       num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns);
+       /*
+        * If the number of CPUs is artificially restricted, such as
+        * with maxcpus=1 on the kernel boot line, Hyper-V could offer
+        * sub-channels >= the number of CPUs. These sub-channels
+        * should not be created. The primary channel is already created
+        * and assigned to one CPU, so check against # CPUs - 1.
+        */
+       num_sc = min((int)(num_online_cpus() - 1), max_chns);
+       if (!num_sc)
+               return;
+
        stor_device = get_out_stor_device(device);
        if (!stor_device)
                return;
index 8af01777d09c74f344ad325256dbd30248febe32..f8cb7c23305b7e984e16ba94406f1f338cb17a46 100644 (file)
@@ -793,6 +793,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
 
        /* We need to know how many queues before we allocate. */
        num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
+       num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
 
        num_targets = virtscsi_config_get(vdev, max_target) + 1;
 
index 9351349cf0a930cd5c25dedd6bb747970e455e96..1e0041ec813238cbfa7ab52c3fdc9799961169d1 100644 (file)
@@ -150,7 +150,12 @@ struct bcm2835_power {
 
 static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
 {
-       u64 start = ktime_get_ns();
+       u64 start;
+
+       if (!reg)
+               return 0;
+
+       start = ktime_get_ns();
 
        /* Enable the module's async AXI bridges. */
        ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
@@ -165,7 +170,12 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
 
 static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
 {
-       u64 start = ktime_get_ns();
+       u64 start;
+
+       if (!reg)
+               return 0;
+
+       start = ktime_get_ns();
 
        /* Enable the module's async AXI bridges. */
        ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
@@ -475,7 +485,7 @@ static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain)
        }
 }
 
-static void
+static int
 bcm2835_init_power_domain(struct bcm2835_power *power,
                          int pd_xlate_index, const char *name)
 {
@@ -483,6 +493,17 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
        struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
 
        dom->clk = devm_clk_get(dev->parent, name);
+       if (IS_ERR(dom->clk)) {
+               int ret = PTR_ERR(dom->clk);
+
+               if (ret == -EPROBE_DEFER)
+                       return ret;
+
+               /* Some domains don't have a clk, so make sure that we
+                * don't deref an error pointer later.
+                */
+               dom->clk = NULL;
+       }
 
        dom->base.name = name;
        dom->base.power_on = bcm2835_power_pd_power_on;
@@ -495,6 +516,8 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
        pm_genpd_init(&dom->base, NULL, true);
 
        power->pd_xlate.domains[pd_xlate_index] = &dom->base;
+
+       return 0;
 }
 
 /** bcm2835_reset_reset - Resets a block that has a reset line in the
@@ -592,7 +615,7 @@ static int bcm2835_power_probe(struct platform_device *pdev)
                { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 },
                { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 },
        };
-       int ret, i;
+       int ret = 0, i;
        u32 id;
 
        power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
@@ -619,8 +642,11 @@ static int bcm2835_power_probe(struct platform_device *pdev)
 
        power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names);
 
-       for (i = 0; i < ARRAY_SIZE(power_domain_names); i++)
-               bcm2835_init_power_domain(power, i, power_domain_names[i]);
+       for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
+               ret = bcm2835_init_power_domain(power, i, power_domain_names[i]);
+               if (ret)
+                       goto fail;
+       }
 
        for (i = 0; i < ARRAY_SIZE(domain_deps); i++) {
                pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base,
@@ -634,12 +660,21 @@ static int bcm2835_power_probe(struct platform_device *pdev)
 
        ret = devm_reset_controller_register(dev, &power->reset);
        if (ret)
-               return ret;
+               goto fail;
 
        of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate);
 
        dev_info(dev, "Broadcom BCM2835 power domains driver");
        return 0;
+
+fail:
+       for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
+               struct generic_pm_domain *dom = &power->domains[i].base;
+
+               if (dom->name)
+                       pm_genpd_remove(dom);
+       }
+       return ret;
 }
 
 static int bcm2835_power_remove(struct platform_device *pdev)
index c0901b96cfe44850f6b6e580d95432b20e4a2d89..62951e836cbc879d1e4be6ba158a8230ebec2c52 100644 (file)
@@ -114,8 +114,6 @@ source "drivers/staging/ralink-gdma/Kconfig"
 
 source "drivers/staging/mt7621-mmc/Kconfig"
 
-source "drivers/staging/mt7621-eth/Kconfig"
-
 source "drivers/staging/mt7621-dts/Kconfig"
 
 source "drivers/staging/gasket/Kconfig"
index 57c6bce13ff4bff0c3487315835c2e1d6b80832e..d1b17ddcd354de10c68455bc64e802c101a59e7d 100644 (file)
@@ -47,7 +47,6 @@ obj-$(CONFIG_SPI_MT7621)      += mt7621-spi/
 obj-$(CONFIG_SOC_MT7621)       += mt7621-dma/
 obj-$(CONFIG_DMA_RALINK)       += ralink-gdma/
 obj-$(CONFIG_MTK_MMC)          += mt7621-mmc/
-obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mt7621-eth/
 obj-$(CONFIG_SOC_MT7621)       += mt7621-dts/
 obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
 obj-$(CONFIG_XIL_AXIS_FIFO)    += axis-fifo/
index 687537203d9cfba144fba1dc846d3cc410776b08..d9725888af6fc34045806fbb5c91ca372b7c9a46 100644 (file)
@@ -3,6 +3,7 @@
 #
 config XIL_AXIS_FIFO
        tristate "Xilinx AXI-Stream FIFO IP core driver"
+       depends on OF
        default n
        help
          This adds support for the Xilinx AXI-Stream
index a7d569cfca5db6b613e31d54eecb48e08f96413e..0dff1ac057cdeb0185cc67357213707aadf5c0cf 100644 (file)
@@ -1001,6 +1001,8 @@ int comedi_dio_insn_config(struct comedi_device *dev,
                           unsigned int mask);
 unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
                                     unsigned int *data);
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+                                      struct comedi_cmd *cmd);
 unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
 unsigned int comedi_nscans_left(struct comedi_subdevice *s,
                                unsigned int nscans);
index eefa62f42c0f06d8b84e03379c0499d7b66d8ace..5a32b8fc000e3df08409028c9ffa5ff979d4efec 100644 (file)
@@ -394,11 +394,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
 EXPORT_SYMBOL_GPL(comedi_dio_update_state);
 
 /**
- * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
+ * bytes
  * @s: COMEDI subdevice.
+ * @cmd: COMEDI command.
  *
  * Determines the overall scan length according to the subdevice type and the
- * number of channels in the scan.
+ * number of channels in the scan for the specified command.
  *
  * For digital input, output or input/output subdevices, samples for
  * multiple channels are assumed to be packed into one or more unsigned
@@ -408,9 +410,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state);
  *
  * Returns the overall scan length in bytes.
  */
-unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+                                      struct comedi_cmd *cmd)
 {
-       struct comedi_cmd *cmd = &s->async->cmd;
        unsigned int num_samples;
        unsigned int bits_per_sample;
 
@@ -427,6 +429,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
        }
        return comedi_samples_to_bytes(s, num_samples);
 }
+EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
+
+/**
+ * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * @s: COMEDI subdevice.
+ *
+ * Determines the overall scan length according to the subdevice type and the
+ * number of channels in the scan for the current command.
+ *
+ * For digital input, output or input/output subdevices, samples for
+ * multiple channels are assumed to be packed into one or more unsigned
+ * short or unsigned int values according to the subdevice's %SDF_LSAMPL
+ * flag.  For other types of subdevice, samples are assumed to occupy a
+ * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
+ *
+ * Returns the overall scan length in bytes.
+ */
+unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+{
+       struct comedi_cmd *cmd = &s->async->cmd;
+
+       return comedi_bytes_per_scan_cmd(s, cmd);
+}
 EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
 
 static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
index 5edf59ac6706d3b5cd7d23d0f945895dc1cb8f48..b04dad8c70927a0aa52229393c063adce4b32e37 100644 (file)
@@ -3545,6 +3545,7 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
                           struct comedi_subdevice *s, struct comedi_cmd *cmd)
 {
        struct ni_private *devpriv = dev->private;
+       unsigned int bytes_per_scan;
        int err = 0;
 
        /* Step 1 : check if triggers are trivially valid */
@@ -3579,9 +3580,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
        err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
        err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
                                           cmd->chanlist_len);
-       err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
-                                           s->async->prealloc_bufsz /
-                                           comedi_bytes_per_scan(s));
+       bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd);
+       if (bytes_per_scan) {
+               err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
+                                                   s->async->prealloc_bufsz /
+                                                   bytes_per_scan);
+       }
 
        if (err)
                return 3;
index 808ed92ed66fe4bedfbbba500452d86771e8162e..1bb1cb6513491b805075456e41c45a253e77c83a 100644 (file)
@@ -463,10 +463,8 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev)
 
        size = usb_endpoint_maxp(devpriv->ep_tx);
        devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
-       if (!devpriv->usb_tx_buf) {
-               kfree(devpriv->usb_rx_buf);
+       if (!devpriv->usb_tx_buf)
                return -ENOMEM;
-       }
 
        return 0;
 }
@@ -518,6 +516,9 @@ static int ni6501_auto_attach(struct comedi_device *dev,
        if (!devpriv)
                return -ENOMEM;
 
+       mutex_init(&devpriv->mut);
+       usb_set_intfdata(intf, devpriv);
+
        ret = ni6501_find_endpoints(dev);
        if (ret)
                return ret;
@@ -526,9 +527,6 @@ static int ni6501_auto_attach(struct comedi_device *dev,
        if (ret)
                return ret;
 
-       mutex_init(&devpriv->mut);
-       usb_set_intfdata(intf, devpriv);
-
        ret = comedi_alloc_subdevices(dev, 2);
        if (ret)
                return ret;
index 6234b649d887ccb3abac4c73dcb38aa095768600..65dc6c51037e30edf30b1ad7e0d6eea7c0390e86 100644 (file)
@@ -682,10 +682,8 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
 
        size = usb_endpoint_maxp(devpriv->ep_tx);
        devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
-       if (!devpriv->usb_tx_buf) {
-               kfree(devpriv->usb_rx_buf);
+       if (!devpriv->usb_tx_buf)
                return -ENOMEM;
-       }
 
        return 0;
 }
@@ -800,6 +798,8 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
 
        devpriv->model = board->model;
 
+       sema_init(&devpriv->limit_sem, 8);
+
        ret = vmk80xx_find_usb_endpoints(dev);
        if (ret)
                return ret;
@@ -808,8 +808,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
        if (ret)
                return ret;
 
-       sema_init(&devpriv->limit_sem, 8);
-
        usb_set_intfdata(intf, devpriv);
 
        if (devpriv->model == VMK8055_MODEL)
index 526e0dbea5b5714618b463cb3eab98b0895e99f6..81af768e7248e514699541552e2eb2cd99e1bc5e 100644 (file)
@@ -298,7 +298,7 @@ submit_bio_retry:
        *last_block = current_block;
 
        /* shift in advance in case of it followed by too many gaps */
-       if (unlikely(bio->bi_vcnt >= bio->bi_max_vecs)) {
+       if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
                /* err should reassign to 0 after submitting */
                err = 0;
                goto submit_bio_out;
index 829f7b12e0dcf4aa3ee34a5315f6d5f52b7087ec..9bbc68729c11052018c26335d6b88498e3491b32 100644 (file)
@@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
        [EROFS_FT_SYMLINK]      = DT_LNK,
 };
 
+static void debug_one_dentry(unsigned char d_type, const char *de_name,
+                            unsigned int de_namelen)
+{
+#ifdef CONFIG_EROFS_FS_DEBUG
+       /* since the on-disk name could not have the trailing '\0' */
+       unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
+
+       memcpy(dbg_namebuf, de_name, de_namelen);
+       dbg_namebuf[de_namelen] = '\0';
+
+       debugln("found dirent %s de_len %u d_type %d", dbg_namebuf,
+               de_namelen, d_type);
+#endif
+}
+
 static int erofs_fill_dentries(struct dir_context *ctx,
                               void *dentry_blk, unsigned int *ofs,
                               unsigned int nameoff, unsigned int maxsize)
@@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx,
        de = dentry_blk + *ofs;
        while (de < end) {
                const char *de_name;
-               int de_namelen;
+               unsigned int de_namelen;
                unsigned char d_type;
-#ifdef CONFIG_EROFS_FS_DEBUG
-               unsigned int dbg_namelen;
-               unsigned char dbg_namebuf[EROFS_NAME_LEN];
-#endif
 
-               if (unlikely(de->file_type < EROFS_FT_MAX))
+               if (de->file_type < EROFS_FT_MAX)
                        d_type = erofs_filetype_table[de->file_type];
                else
                        d_type = DT_UNKNOWN;
@@ -48,26 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx,
                nameoff = le16_to_cpu(de->nameoff);
                de_name = (char *)dentry_blk + nameoff;
 
-               de_namelen = unlikely(de + 1 >= end) ?
-                       /* last directory entry */
-                       strnlen(de_name, maxsize - nameoff) :
-                       le16_to_cpu(de[1].nameoff) - nameoff;
+               /* the last dirent in the block? */
+               if (de + 1 >= end)
+                       de_namelen = strnlen(de_name, maxsize - nameoff);
+               else
+                       de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
 
                /* a corrupted entry is found */
-               if (unlikely(de_namelen < 0)) {
+               if (unlikely(nameoff + de_namelen > maxsize ||
+                            de_namelen > EROFS_NAME_LEN)) {
                        DBG_BUGON(1);
                        return -EIO;
                }
 
-#ifdef CONFIG_EROFS_FS_DEBUG
-               dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
-               memcpy(dbg_namebuf, de_name, dbg_namelen);
-               dbg_namebuf[dbg_namelen] = '\0';
-
-               debugln("%s, found de_name %s de_len %d d_type %d", __func__,
-                       dbg_namebuf, de_namelen, d_type);
-#endif
-
+               debug_one_dentry(d_type, de_name, de_namelen);
                if (!dir_emit(ctx, de_name, de_namelen,
                              le64_to_cpu(de->nid), d_type))
                        /* stopped by some reason */
index 8715bc50e09c16d44ece32baa474eb7d9bc5ab8f..31eef839577436709b1a5261507aff59bcf821d7 100644 (file)
@@ -972,6 +972,7 @@ repeat:
        overlapped = false;
        compressed_pages = grp->compressed_pages;
 
+       err = 0;
        for (i = 0; i < clusterpages; ++i) {
                unsigned int pagenr;
 
@@ -981,26 +982,39 @@ repeat:
                DBG_BUGON(!page);
                DBG_BUGON(!page->mapping);
 
-               if (z_erofs_is_stagingpage(page))
-                       continue;
+               if (!z_erofs_is_stagingpage(page)) {
 #ifdef EROFS_FS_HAS_MANAGED_CACHE
-               if (page->mapping == MNGD_MAPPING(sbi)) {
-                       DBG_BUGON(!PageUptodate(page));
-                       continue;
-               }
+                       if (page->mapping == MNGD_MAPPING(sbi)) {
+                               if (unlikely(!PageUptodate(page)))
+                                       err = -EIO;
+                               continue;
+                       }
 #endif
 
-               /* only non-head page could be reused as a compressed page */
-               pagenr = z_erofs_onlinepage_index(page);
+                       /*
+                        * only if non-head page can be selected
+                        * for inplace decompression
+                        */
+                       pagenr = z_erofs_onlinepage_index(page);
 
-               DBG_BUGON(pagenr >= nr_pages);
-               DBG_BUGON(pages[pagenr]);
-               ++sparsemem_pages;
-               pages[pagenr] = page;
+                       DBG_BUGON(pagenr >= nr_pages);
+                       DBG_BUGON(pages[pagenr]);
+                       ++sparsemem_pages;
+                       pages[pagenr] = page;
 
-               overlapped = true;
+                       overlapped = true;
+               }
+
+               /* PG_error needs checking for inplaced and staging pages */
+               if (unlikely(PageError(page))) {
+                       DBG_BUGON(PageUptodate(page));
+                       err = -EIO;
+               }
        }
 
+       if (unlikely(err))
+               goto out;
+
        llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
 
        if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
@@ -1029,6 +1043,10 @@ repeat:
 
 skip_allocpage:
        vout = erofs_vmap(pages, nr_pages);
+       if (!vout) {
+               err = -ENOMEM;
+               goto out;
+       }
 
        err = z_erofs_vle_unzip_vmap(compressed_pages,
                clusterpages, vout, llen, work->pageofs, overlapped);
@@ -1194,6 +1212,7 @@ repeat:
        if (page->mapping == mc) {
                WRITE_ONCE(grp->compressed_pages[nr], page);
 
+               ClearPageError(page);
                if (!PagePrivate(page)) {
                        /*
                         * impossible to be !PagePrivate(page) for
index 48b263a2731aad2edd28f19f9df3fcc8e461bc1c..0daac9b984a8ec82207ca4e53da9a201a4204707 100644 (file)
@@ -136,10 +136,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
 
        nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
 
-       if (clusterpages == 1)
+       if (clusterpages == 1) {
                vin = kmap_atomic(compressed_pages[0]);
-       else
+       } else {
                vin = erofs_vmap(compressed_pages, clusterpages);
+               if (!vin)
+                       return -ENOMEM;
+       }
 
        preempt_disable();
        vout = erofs_pcpubuf[smp_processor_id()].data;
index acdbc07fd2592c03084a0c6fb6e89aee073f58fd..2fc8bc22b57baa39a3d4a8cd56ae0f3d2d0a0af2 100644 (file)
 #define AD7192_CH_AIN3         BIT(6) /* AIN3 - AINCOM */
 #define AD7192_CH_AIN4         BIT(7) /* AIN4 - AINCOM */
 
-#define AD7193_CH_AIN1P_AIN2M  0x000  /* AIN1(+) - AIN2(-) */
-#define AD7193_CH_AIN3P_AIN4M  0x001  /* AIN3(+) - AIN4(-) */
-#define AD7193_CH_AIN5P_AIN6M  0x002  /* AIN5(+) - AIN6(-) */
-#define AD7193_CH_AIN7P_AIN8M  0x004  /* AIN7(+) - AIN8(-) */
+#define AD7193_CH_AIN1P_AIN2M  0x001  /* AIN1(+) - AIN2(-) */
+#define AD7193_CH_AIN3P_AIN4M  0x002  /* AIN3(+) - AIN4(-) */
+#define AD7193_CH_AIN5P_AIN6M  0x004  /* AIN5(+) - AIN6(-) */
+#define AD7193_CH_AIN7P_AIN8M  0x008  /* AIN7(+) - AIN8(-) */
 #define AD7193_CH_TEMP         0x100 /* Temp senseor */
 #define AD7193_CH_AIN2P_AIN2M  0x200 /* AIN2(+) - AIN2(-) */
 #define AD7193_CH_AIN1         0x401 /* AIN1 - AINCOM */
index 029c3bf42d4d942f2e58c81cfb03292fc8eae0d3..07774c000c5a68db9f7f6c1e93eae1840fac23ec 100644 (file)
@@ -269,7 +269,7 @@ static IIO_DEV_ATTR_VPEAK(0644,
 static IIO_DEV_ATTR_IPEAK(0644,
                ade7854_read_32bit,
                ade7854_write_32bit,
-               ADE7854_VPEAK);
+               ADE7854_IPEAK);
 static IIO_DEV_ATTR_APHCAL(0644,
                ade7854_read_16bit,
                ade7854_write_16bit,
index 18936cdb10830ae4506435377a3342bb0c2e076e..956daf8c3bd24f9b1ccce2a254c26ea5e75e7ba9 100644 (file)
@@ -1431,7 +1431,7 @@ int most_register_interface(struct most_interface *iface)
 
        INIT_LIST_HEAD(&iface->p->channel_list);
        iface->p->dev_id = id;
-       snprintf(iface->p->name, STRING_SIZE, "mdev%d", id);
+       strcpy(iface->p->name, iface->description);
        iface->dev.init_name = iface->p->name;
        iface->dev.bus = &mc.bus;
        iface->dev.parent = &mc.dev;
index b733855402168efffa6a8a192a3d21e5d3e0ff60..250c15ace2a71147be4e6f14e522b6a3e898745e 100644 (file)
        status = "okay";
 };
 
-&ethernet {
-       //mtd-mac-address = <&factory 0xe000>;
-       gmac1: mac@0 {
-               compatible = "mediatek,eth-mac";
-               reg = <0>;
-               phy-handle = <&phy1>;
-       };
-
-       mdio-bus {
-               phy1: ethernet-phy@1 {
-                       reg = <1>;
-                       phy-mode = "rgmii";
-               };
-       };
-};
-
 &pinctrl {
        state_default: pinctrl0 {
                gpio {
                };
        };
 };
+
+&switch0 {
+       ports {
+               port@0 {
+                       label = "ethblack";
+                       status = "ok";
+               };
+               port@4 {
+                       label = "ethblue";
+                       status = "ok";
+               };
+       };
+};
index 6aff3680ce4b6b4574247ffc564b2fe922e5a544..17020e24abd294055b321985c6806904eb1e8e81 100644 (file)
 
                mediatek,ethsys = <&ethsys>;
 
-               mediatek,switch = <&gsw>;
 
+               gmac0: mac@0 {
+                       compatible = "mediatek,eth-mac";
+                       reg = <0>;
+                       phy-mode = "rgmii";
+                       fixed-link {
+                               speed = <1000>;
+                               full-duplex;
+                               pause;
+                       };
+               };
+               gmac1: mac@1 {
+                       compatible = "mediatek,eth-mac";
+                       reg = <1>;
+                       status = "off";
+                       phy-mode = "rgmii";
+                       phy-handle = <&phy5>;
+               };
                mdio-bus {
                        #address-cells = <1>;
                        #size-cells = <0>;
 
-                       phy1f: ethernet-phy@1f {
-                               reg = <0x1f>;
+                       phy5: ethernet-phy@5 {
+                               reg = <5>;
                                phy-mode = "rgmii";
                        };
+
+                       switch0: switch0@0 {
+                               compatible = "mediatek,mt7621";
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               reg = <0>;
+                               mediatek,mcm;
+                               resets = <&rstctrl 2>;
+                               reset-names = "mcm";
+
+                               ports {
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+                                       reg = <0>;
+                                       port@0 {
+                                               status = "off";
+                                               reg = <0>;
+                                               label = "lan0";
+                                       };
+                                       port@1 {
+                                               status = "off";
+                                               reg = <1>;
+                                               label = "lan1";
+                                       };
+                                       port@2 {
+                                               status = "off";
+                                               reg = <2>;
+                                               label = "lan2";
+                                       };
+                                       port@3 {
+                                               status = "off";
+                                               reg = <3>;
+                                               label = "lan3";
+                                       };
+                                       port@4 {
+                                               status = "off";
+                                               reg = <4>;
+                                               label = "lan4";
+                                       };
+                                       port@6 {
+                                               reg = <6>;
+                                               label = "cpu";
+                                               ethernet = <&gmac0>;
+                                               phy-mode = "trgmii";
+                                               fixed-link {
+                                                       speed = <1000>;
+                                                       full-duplex;
+                                               };
+                                       };
+                               };
+                       };
                };
        };
 
diff --git a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt b/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
deleted file mode 100644 (file)
index 596b385..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-Mediatek Gigabit Switch
-=======================
-
-The mediatek gigabit switch can be found on Mediatek SoCs.
-
-Required properties:
-- compatible: Should be "mediatek,mt7620-gsw", "mediatek,mt7621-gsw",
-  "mediatek,mt7623-gsw"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the gigabit switches interrupt
-
-
-Additional required properties for ARM based SoCs:
-- mediatek,reset-pin: phandle describing the reset GPIO
-- clocks: the clocks used by the switch
-- clock-names: the names of the clocks listed in the clocks property
-  these should be "trgpll", "esw", "gp2", "gp1"
-- mt7530-supply: the phandle of the regulator used to power the switch
-- mediatek,pctl-regmap: phandle to the port control regmap. this is used to
-  setup the drive current
-
-
-Optional properties:
-- interrupt-parent: Should be the phandle for the interrupt controller
-  that services interrupts for this device
-
-Example:
-
-gsw: switch@1b100000 {
-       compatible = "mediatek,mt7623-gsw";
-       reg = <0 0x1b110000 0 0x300000>;
-
-       interrupt-parent = <&pio>;
-       interrupts = <168 IRQ_TYPE_EDGE_RISING>;
-
-       clocks = <&apmixedsys CLK_APMIXED_TRGPLL>,
-                <&ethsys CLK_ETHSYS_ESW>,
-                <&ethsys CLK_ETHSYS_GP2>,
-                <&ethsys CLK_ETHSYS_GP1>;
-       clock-names = "trgpll", "esw", "gp2", "gp1";
-
-       mt7530-supply = <&mt6323_vpa_reg>;
-
-       mediatek,pctl-regmap = <&syscfg_pctl_a>;
-       mediatek,reset-pin = <&pio 15 0>;
-
-       status = "okay";
-};
diff --git a/drivers/staging/mt7621-eth/Kconfig b/drivers/staging/mt7621-eth/Kconfig
deleted file mode 100644 (file)
index 44ea86c..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-config NET_VENDOR_MEDIATEK_STAGING
-       bool "MediaTek ethernet driver - staging version"
-       depends on RALINK
-       ---help---
-         If you have an MT7621 Mediatek SoC with ethernet, say Y.
-
-if NET_VENDOR_MEDIATEK_STAGING
-choice
-       prompt "MAC type"
-
-config NET_MEDIATEK_MT7621
-       bool "MT7621"
-       depends on MIPS && SOC_MT7621
-
-endchoice
-
-config NET_MEDIATEK_SOC_STAGING
-       tristate "MediaTek SoC Gigabit Ethernet support"
-       depends on NET_VENDOR_MEDIATEK_STAGING
-       select PHYLIB
-       ---help---
-         This driver supports the gigabit ethernet MACs in the
-         MediaTek SoC family.
-
-config NET_MEDIATEK_MDIO
-       def_bool NET_MEDIATEK_SOC_STAGING
-       depends on NET_MEDIATEK_MT7621
-       select PHYLIB
-
-config NET_MEDIATEK_MDIO_MT7620
-       def_bool NET_MEDIATEK_SOC_STAGING
-       depends on NET_MEDIATEK_MT7621
-       select NET_MEDIATEK_MDIO
-
-config NET_MEDIATEK_GSW_MT7621
-       def_tristate NET_MEDIATEK_SOC_STAGING
-       depends on NET_MEDIATEK_MT7621
-
-endif #NET_VENDOR_MEDIATEK_STAGING
diff --git a/drivers/staging/mt7621-eth/Makefile b/drivers/staging/mt7621-eth/Makefile
deleted file mode 100644 (file)
index 018bcc3..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Makefile for the Ralink SoCs built-in ethernet macs
-#
-
-mtk-eth-soc-y                                  += mtk_eth_soc.o ethtool.o
-
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO)                += mdio.o
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO_MT7620) += mdio_mt7620.o
-
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MT7621)      += soc_mt7621.o
-
-obj-$(CONFIG_NET_MEDIATEK_GSW_MT7621)          += gsw_mt7621.o
-
-obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING)         += mtk-eth-soc.o
diff --git a/drivers/staging/mt7621-eth/TODO b/drivers/staging/mt7621-eth/TODO
deleted file mode 100644 (file)
index f9e47d4..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-
-- verify devicetree documentation is consistent with code
-- fix ethtool - currently doesn't return valid data.
-- general code review and clean up
-- add support for second MAC on mt7621
-- convert gsw code to use switchdev interfaces
-- md7620_mmi_write etc should probably be wrapped
-  in a regmap abstraction.
-- Get soc_mt7621 to work with QDMA TX if possible.
-- Ensure phys are correctly configured when a cable
-  is plugged in.
-
-Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-eth/ethtool.c b/drivers/staging/mt7621-eth/ethtool.c
deleted file mode 100644 (file)
index 8c4228e..0000000
+++ /dev/null
@@ -1,250 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include "mtk_eth_soc.h"
-#include "ethtool.h"
-
-struct mtk_stat {
-       char name[ETH_GSTRING_LEN];
-       unsigned int idx;
-};
-
-#define MTK_HW_STAT(stat) { \
-       .name = #stat, \
-       .idx = offsetof(struct mtk_hw_stats, stat) / sizeof(u64) \
-}
-
-static const struct mtk_stat mtk_ethtool_hw_stats[] = {
-       MTK_HW_STAT(tx_bytes),
-       MTK_HW_STAT(tx_packets),
-       MTK_HW_STAT(tx_skip),
-       MTK_HW_STAT(tx_collisions),
-       MTK_HW_STAT(rx_bytes),
-       MTK_HW_STAT(rx_packets),
-       MTK_HW_STAT(rx_overflow),
-       MTK_HW_STAT(rx_fcs_errors),
-       MTK_HW_STAT(rx_short_errors),
-       MTK_HW_STAT(rx_long_errors),
-       MTK_HW_STAT(rx_checksum_errors),
-       MTK_HW_STAT(rx_flow_control_packets),
-};
-
-#define MTK_HW_STATS_LEN       ARRAY_SIZE(mtk_ethtool_hw_stats)
-
-static int mtk_get_link_ksettings(struct net_device *dev,
-                                 struct ethtool_link_ksettings *cmd)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       int err;
-
-       if (!mac->phy_dev)
-               return -ENODEV;
-
-       if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
-               err = phy_read_status(mac->phy_dev);
-               if (err)
-                       return -ENODEV;
-       }
-
-       phy_ethtool_ksettings_get(mac->phy_dev, cmd);
-       return 0;
-}
-
-static int mtk_set_link_ksettings(struct net_device *dev,
-                                 const struct ethtool_link_ksettings *cmd)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       if (!mac->phy_dev)
-               return -ENODEV;
-
-       if (cmd->base.phy_address != mac->phy_dev->mdio.addr) {
-               if (mac->hw->phy->phy_node[cmd->base.phy_address]) {
-                       mac->phy_dev = mac->hw->phy->phy[cmd->base.phy_address];
-                       mac->phy_flags = MTK_PHY_FLAG_PORT;
-               } else if (mac->hw->mii_bus) {
-                       mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
-                                                      cmd->base.phy_address);
-                       if (!mac->phy_dev)
-                               return -ENODEV;
-                       mac->phy_flags = MTK_PHY_FLAG_ATTACH;
-               } else {
-                       return -ENODEV;
-               }
-       }
-
-       return phy_ethtool_ksettings_set(mac->phy_dev, cmd);
-}
-
-static void mtk_get_drvinfo(struct net_device *dev,
-                           struct ethtool_drvinfo *info)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_soc_data *soc = mac->hw->soc;
-
-       strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
-       strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
-
-       if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE])
-               info->n_stats = MTK_HW_STATS_LEN;
-}
-
-static u32 mtk_get_msglevel(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       return mac->hw->msg_enable;
-}
-
-static void mtk_set_msglevel(struct net_device *dev, u32 value)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       mac->hw->msg_enable = value;
-}
-
-static int mtk_nway_reset(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       if (!mac->phy_dev)
-               return -EOPNOTSUPP;
-
-       return genphy_restart_aneg(mac->phy_dev);
-}
-
-static u32 mtk_get_link(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       int err;
-
-       if (!mac->phy_dev)
-               goto out_get_link;
-
-       if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
-               err = genphy_update_link(mac->phy_dev);
-               if (err)
-                       goto out_get_link;
-       }
-
-       return mac->phy_dev->link;
-
-out_get_link:
-       return ethtool_op_get_link(dev);
-}
-
-static int mtk_set_ringparam(struct net_device *dev,
-                            struct ethtool_ringparam *ring)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       if ((ring->tx_pending < 2) ||
-           (ring->rx_pending < 2) ||
-           (ring->rx_pending > mac->hw->soc->dma_ring_size) ||
-           (ring->tx_pending > mac->hw->soc->dma_ring_size))
-               return -EINVAL;
-
-       dev->netdev_ops->ndo_stop(dev);
-
-       mac->hw->tx_ring.tx_ring_size = BIT(fls(ring->tx_pending) - 1);
-       mac->hw->rx_ring[0].rx_ring_size = BIT(fls(ring->rx_pending) - 1);
-
-       return dev->netdev_ops->ndo_open(dev);
-}
-
-static void mtk_get_ringparam(struct net_device *dev,
-                             struct ethtool_ringparam *ring)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       ring->rx_max_pending = mac->hw->soc->dma_ring_size;
-       ring->tx_max_pending = mac->hw->soc->dma_ring_size;
-       ring->rx_pending = mac->hw->rx_ring[0].rx_ring_size;
-       ring->tx_pending = mac->hw->tx_ring.tx_ring_size;
-}
-
-static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
-{
-       int i;
-
-       switch (stringset) {
-       case ETH_SS_STATS:
-               for (i = 0; i < MTK_HW_STATS_LEN; i++) {
-                       memcpy(data, mtk_ethtool_hw_stats[i].name,
-                              ETH_GSTRING_LEN);
-                       data += ETH_GSTRING_LEN;
-               }
-               break;
-       }
-}
-
-static int mtk_get_sset_count(struct net_device *dev, int sset)
-{
-       switch (sset) {
-       case ETH_SS_STATS:
-               return MTK_HW_STATS_LEN;
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-static void mtk_get_ethtool_stats(struct net_device *dev,
-                                 struct ethtool_stats *stats, u64 *data)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_hw_stats *hwstats = mac->hw_stats;
-       unsigned int start;
-       int i;
-
-       if (netif_running(dev) && netif_device_present(dev)) {
-               if (spin_trylock(&hwstats->stats_lock)) {
-                       mtk_stats_update_mac(mac);
-                       spin_unlock(&hwstats->stats_lock);
-               }
-       }
-
-       do {
-               start = u64_stats_fetch_begin_irq(&hwstats->syncp);
-               for (i = 0; i < MTK_HW_STATS_LEN; i++)
-                       data[i] = ((u64 *)hwstats)[mtk_ethtool_hw_stats[i].idx];
-
-       } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
-}
-
-static struct ethtool_ops mtk_ethtool_ops = {
-       .get_link_ksettings     = mtk_get_link_ksettings,
-       .set_link_ksettings     = mtk_set_link_ksettings,
-       .get_drvinfo            = mtk_get_drvinfo,
-       .get_msglevel           = mtk_get_msglevel,
-       .set_msglevel           = mtk_set_msglevel,
-       .nway_reset             = mtk_nway_reset,
-       .get_link               = mtk_get_link,
-       .set_ringparam          = mtk_set_ringparam,
-       .get_ringparam          = mtk_get_ringparam,
-};
-
-void mtk_set_ethtool_ops(struct net_device *netdev)
-{
-       struct mtk_mac *mac = netdev_priv(netdev);
-       struct mtk_soc_data *soc = mac->hw->soc;
-
-       if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE]) {
-               mtk_ethtool_ops.get_strings = mtk_get_strings;
-               mtk_ethtool_ops.get_sset_count = mtk_get_sset_count;
-               mtk_ethtool_ops.get_ethtool_stats = mtk_get_ethtool_stats;
-       }
-
-       netdev->ethtool_ops = &mtk_ethtool_ops;
-}
diff --git a/drivers/staging/mt7621-eth/ethtool.h b/drivers/staging/mt7621-eth/ethtool.h
deleted file mode 100644 (file)
index 0071469..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef MTK_ETHTOOL_H
-#define MTK_ETHTOOL_H
-
-#include <linux/ethtool.h>
-
-void mtk_set_ethtool_ops(struct net_device *netdev);
-
-#endif /* MTK_ETHTOOL_H */
diff --git a/drivers/staging/mt7621-eth/gsw_mt7620.h b/drivers/staging/mt7621-eth/gsw_mt7620.h
deleted file mode 100644 (file)
index 70f7e54..0000000
+++ /dev/null
@@ -1,277 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef _RALINK_GSW_MT7620_H__
-#define _RALINK_GSW_MT7620_H__
-
-#define GSW_REG_PHY_TIMEOUT    (5 * HZ)
-
-#define MT7620_GSW_REG_PIAC    0x0004
-
-#define GSW_NUM_VLANS          16
-#define GSW_NUM_VIDS           4096
-#define GSW_NUM_PORTS          7
-#define GSW_PORT6              6
-
-#define GSW_MDIO_ACCESS                BIT(31)
-#define GSW_MDIO_READ          BIT(19)
-#define GSW_MDIO_WRITE         BIT(18)
-#define GSW_MDIO_START         BIT(16)
-#define GSW_MDIO_ADDR_SHIFT    20
-#define GSW_MDIO_REG_SHIFT     25
-
-#define GSW_REG_PORT_PMCR(x)   (0x3000 + (x * 0x100))
-#define GSW_REG_PORT_STATUS(x) (0x3008 + (x * 0x100))
-#define GSW_REG_SMACCR0                0x3fE4
-#define GSW_REG_SMACCR1                0x3fE8
-#define GSW_REG_CKGCR          0x3ff0
-
-#define GSW_REG_IMR            0x7008
-#define GSW_REG_ISR            0x700c
-#define GSW_REG_GPC1           0x7014
-
-#define SYSC_REG_CHIP_REV_ID   0x0c
-#define SYSC_REG_CFG           0x10
-#define SYSC_REG_CFG1          0x14
-#define RST_CTRL_MCM           BIT(2)
-#define SYSC_PAD_RGMII2_MDIO   0x58
-#define SYSC_GPIO_MODE         0x60
-
-#define PORT_IRQ_ST_CHG                0x7f
-
-#define MT7621_ESW_PHY_POLLING 0x0000
-#define MT7620_ESW_PHY_POLLING 0x7000
-
-#define        PMCR_IPG                BIT(18)
-#define        PMCR_MAC_MODE           BIT(16)
-#define        PMCR_FORCE              BIT(15)
-#define        PMCR_TX_EN              BIT(14)
-#define        PMCR_RX_EN              BIT(13)
-#define        PMCR_BACKOFF            BIT(9)
-#define        PMCR_BACKPRES           BIT(8)
-#define        PMCR_RX_FC              BIT(5)
-#define        PMCR_TX_FC              BIT(4)
-#define        PMCR_SPEED(_x)          (_x << 2)
-#define        PMCR_DUPLEX             BIT(1)
-#define        PMCR_LINK               BIT(0)
-
-#define PHY_AN_EN              BIT(31)
-#define PHY_PRE_EN             BIT(30)
-#define PMY_MDC_CONF(_x)       ((_x & 0x3f) << 24)
-
-/* ethernet subsystem config register */
-#define ETHSYS_SYSCFG0         0x14
-/* ethernet subsystem clock register */
-#define ETHSYS_CLKCFG0         0x2c
-#define ETHSYS_TRGMII_CLK_SEL362_5     BIT(11)
-
-/* p5 RGMII wrapper TX clock control register */
-#define MT7530_P5RGMIITXCR     0x7b04
-/* p5 RGMII wrapper RX clock control register */
-#define MT7530_P5RGMIIRXCR     0x7b00
-/* TRGMII TDX ODT registers */
-#define MT7530_TRGMII_TD0_ODT  0x7a54
-#define MT7530_TRGMII_TD1_ODT  0x7a5c
-#define MT7530_TRGMII_TD2_ODT  0x7a64
-#define MT7530_TRGMII_TD3_ODT  0x7a6c
-#define MT7530_TRGMII_TD4_ODT  0x7a74
-#define MT7530_TRGMII_TD5_ODT  0x7a7c
-/* TRGMII TCK ctrl register */
-#define MT7530_TRGMII_TCK_CTRL 0x7a78
-/* TRGMII Tx ctrl register */
-#define MT7530_TRGMII_TXCTRL   0x7a40
-/* port 6 extended control register */
-#define MT7530_P6ECR            0x7830
-/* IO driver control register */
-#define MT7530_IO_DRV_CR       0x7810
-/* top signal control register */
-#define MT7530_TOP_SIG_CTRL    0x7808
-/* modified hwtrap register */
-#define MT7530_MHWTRAP         0x7804
-/* hwtrap status register */
-#define MT7530_HWTRAP          0x7800
-/* status interrupt register */
-#define MT7530_SYS_INT_STS     0x700c
-/* system nterrupt register */
-#define MT7530_SYS_INT_EN      0x7008
-/* system control register */
-#define MT7530_SYS_CTRL                0x7000
-/* port MAC status register */
-#define MT7530_PMSR_P(x)       (0x3008 + (x * 0x100))
-/* port MAC control register */
-#define MT7530_PMCR_P(x)       (0x3000 + (x * 0x100))
-
-#define MT7621_XTAL_SHIFT      6
-#define MT7621_XTAL_MASK       0x7
-#define MT7621_XTAL_25         6
-#define MT7621_XTAL_40         3
-#define MT7621_MDIO_DRV_MASK   (3 << 4)
-#define MT7621_GE1_MODE_MASK   (3 << 12)
-
-#define TRGMII_TXCTRL_TXC_INV  BIT(30)
-#define P6ECR_INTF_MODE_RGMII  BIT(1)
-#define P5RGMIIRXCR_C_ALIGN    BIT(8)
-#define P5RGMIIRXCR_DELAY_2    BIT(1)
-#define P5RGMIITXCR_DELAY_2    (BIT(8) | BIT(2))
-
-/* TOP_SIG_CTRL bits */
-#define TOP_SIG_CTRL_NORMAL    (BIT(17) | BIT(16))
-
-/* MHWTRAP bits */
-#define MHWTRAP_MANUAL         BIT(16)
-#define MHWTRAP_P5_MAC_SEL     BIT(13)
-#define MHWTRAP_P6_DIS         BIT(8)
-#define MHWTRAP_P5_RGMII_MODE  BIT(7)
-#define MHWTRAP_P5_DIS         BIT(6)
-#define MHWTRAP_PHY_ACCESS     BIT(5)
-
-/* HWTRAP bits */
-#define HWTRAP_XTAL_SHIFT      9
-#define HWTRAP_XTAL_MASK       0x3
-
-/* SYS_CTRL bits */
-#define SYS_CTRL_SW_RST                BIT(1)
-#define SYS_CTRL_REG_RST       BIT(0)
-
-/* PMCR bits */
-#define PMCR_IFG_XMIT_96       BIT(18)
-#define PMCR_MAC_MODE          BIT(16)
-#define PMCR_FORCE_MODE                BIT(15)
-#define PMCR_TX_EN             BIT(14)
-#define PMCR_RX_EN             BIT(13)
-#define PMCR_BACK_PRES_EN      BIT(9)
-#define PMCR_BACKOFF_EN                BIT(8)
-#define PMCR_TX_FC_EN          BIT(5)
-#define PMCR_RX_FC_EN          BIT(4)
-#define PMCR_FORCE_SPEED_1000  BIT(3)
-#define PMCR_FORCE_FDX         BIT(1)
-#define PMCR_FORCE_LNK         BIT(0)
-#define PMCR_FIXED_LINK                (PMCR_IFG_XMIT_96 | PMCR_MAC_MODE | \
-                                PMCR_FORCE_MODE | PMCR_TX_EN | PMCR_RX_EN | \
-                                PMCR_BACK_PRES_EN | PMCR_BACKOFF_EN | \
-                                PMCR_FORCE_SPEED_1000 | PMCR_FORCE_FDX | \
-                                PMCR_FORCE_LNK)
-
-#define PMCR_FIXED_LINK_FC     (PMCR_FIXED_LINK | \
-                                PMCR_TX_FC_EN | PMCR_RX_FC_EN)
-
-/* TRGMII control registers */
-#define GSW_INTF_MODE          0x390
-#define GSW_TRGMII_TD0_ODT     0x354
-#define GSW_TRGMII_TD1_ODT     0x35c
-#define GSW_TRGMII_TD2_ODT     0x364
-#define GSW_TRGMII_TD3_ODT     0x36c
-#define GSW_TRGMII_TXCTL_ODT   0x374
-#define GSW_TRGMII_TCK_ODT     0x37c
-#define GSW_TRGMII_RCK_CTRL    0x300
-
-#define INTF_MODE_TRGMII       BIT(1)
-#define TRGMII_RCK_CTRL_RX_RST BIT(31)
-
-/* Mac control registers */
-#define MTK_MAC_P2_MCR         0x200
-#define MTK_MAC_P1_MCR         0x100
-
-#define MAC_MCR_MAX_RX_2K      BIT(29)
-#define MAC_MCR_IPG_CFG                (BIT(18) | BIT(16))
-#define MAC_MCR_FORCE_MODE     BIT(15)
-#define MAC_MCR_TX_EN          BIT(14)
-#define MAC_MCR_RX_EN          BIT(13)
-#define MAC_MCR_BACKOFF_EN     BIT(9)
-#define MAC_MCR_BACKPR_EN      BIT(8)
-#define MAC_MCR_FORCE_RX_FC    BIT(5)
-#define MAC_MCR_FORCE_TX_FC    BIT(4)
-#define MAC_MCR_SPEED_1000     BIT(3)
-#define MAC_MCR_FORCE_DPX      BIT(1)
-#define MAC_MCR_FORCE_LINK     BIT(0)
-#define MAC_MCR_FIXED_LINK     (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
-                                MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \
-                                MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \
-                                MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \
-                                MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
-                                MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
-#define MAC_MCR_FIXED_LINK_FC  (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
-                                MAC_MCR_FIXED_LINK)
-
-/* possible XTAL speed */
-#define        MT7623_XTAL_40          0
-#define MT7623_XTAL_20         1
-#define MT7623_XTAL_25         3
-
-/* GPIO port control registers */
-#define        GPIO_OD33_CTRL8         0x4c0
-#define        GPIO_BIAS_CTRL          0xed0
-#define GPIO_DRV_SEL10         0xf00
-
-/* on MT7620 the functio of port 4 can be software configured */
-enum {
-       PORT4_EPHY = 0,
-       PORT4_EXT,
-};
-
-/* struct mt7620_gsw - the structure that holds the SoC specific data
- * @dev:               The Device struct
- * @base:              The base address
- * @piac_offset:       The PIAC base may change depending on SoC
- * @irq:               The IRQ we are using
- * @port4:             The port4 mode on MT7620
- * @autopoll:          Is MDIO autopolling enabled
- * @ethsys:            The ethsys register map
- * @pctl:              The pin control register map
- * @clk_gsw:           The switch clock
- * @clk_gp1:           The gmac1 clock
- * @clk_gp2:           The gmac2 clock
- * @clk_trgpll:                The trgmii pll clock
- */
-struct mt7620_gsw {
-       struct device           *dev;
-       void __iomem            *base;
-       u32                     piac_offset;
-       int                     irq;
-       int                     port4;
-       unsigned long int       autopoll;
-
-       struct regmap           *ethsys;
-       struct regmap           *pctl;
-
-       struct clk              *clk_gsw;
-       struct clk              *clk_gp1;
-       struct clk              *clk_gp2;
-       struct clk              *clk_trgpll;
-};
-
-/* switch register I/O wrappers */
-void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg);
-u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg);
-
-/* the callback used by the driver core to bringup the switch */
-int mtk_gsw_init(struct mtk_eth *eth);
-
-/* MDIO access wrappers */
-int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val);
-int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg);
-void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port);
-int mt7620_has_carrier(struct mtk_eth *eth);
-void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
-                            int speed, int duplex);
-void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val);
-u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg);
-void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg);
-
-u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
-                     u32 phy_register, u32 write_data);
-u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg);
-void mt7620_handle_carrier(struct mtk_eth *eth);
-
-#endif
diff --git a/drivers/staging/mt7621-eth/gsw_mt7621.c b/drivers/staging/mt7621-eth/gsw_mt7621.c
deleted file mode 100644 (file)
index 53767b1..0000000
+++ /dev/null
@@ -1,297 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-
-#include <ralink_regs.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-
-void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg)
-{
-       iowrite32(val, gsw->base + reg);
-}
-EXPORT_SYMBOL_GPL(mtk_switch_w32);
-
-u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg)
-{
-       return ioread32(gsw->base + reg);
-}
-EXPORT_SYMBOL_GPL(mtk_switch_r32);
-
-static irqreturn_t gsw_interrupt_mt7621(int irq, void *_eth)
-{
-       struct mtk_eth *eth = (struct mtk_eth *)_eth;
-       struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-       u32 reg, i;
-
-       reg = mt7530_mdio_r32(gsw, MT7530_SYS_INT_STS);
-
-       for (i = 0; i < 5; i++) {
-               unsigned int link;
-
-               if ((reg & BIT(i)) == 0)
-                       continue;
-
-               link = mt7530_mdio_r32(gsw, MT7530_PMSR_P(i)) & 0x1;
-
-               if (link == eth->link[i])
-                       continue;
-
-               eth->link[i] = link;
-               if (link)
-                       netdev_info(*eth->netdev,
-                                   "port %d link up\n", i);
-               else
-                       netdev_info(*eth->netdev,
-                                   "port %d link down\n", i);
-       }
-
-       mt7530_mdio_w32(gsw, MT7530_SYS_INT_STS, 0x1f);
-
-       return IRQ_HANDLED;
-}
-
-static void mt7621_hw_init(struct mtk_eth *eth, struct mt7620_gsw *gsw,
-                          struct device_node *np)
-{
-       u32 i;
-       u32 val;
-
-       /* hardware reset the switch */
-       mtk_reset(eth, RST_CTRL_MCM);
-       mdelay(10);
-
-       /* reduce RGMII2 PAD driving strength */
-       rt_sysc_m32(MT7621_MDIO_DRV_MASK, 0, SYSC_PAD_RGMII2_MDIO);
-
-       /* gpio mux - RGMII1=Normal mode */
-       rt_sysc_m32(BIT(14), 0, SYSC_GPIO_MODE);
-
-       /* set GMAC1 RGMII mode */
-       rt_sysc_m32(MT7621_GE1_MODE_MASK, 0, SYSC_REG_CFG1);
-
-       /* enable MDIO to control MT7530 */
-       rt_sysc_m32(3 << 12, 0, SYSC_GPIO_MODE);
-
-       /* turn off all PHYs */
-       for (i = 0; i <= 4; i++) {
-               val = _mt7620_mii_read(gsw, i, 0x0);
-               val |= BIT(11);
-               _mt7620_mii_write(gsw, i, 0x0, val);
-       }
-
-       /* reset the switch */
-       mt7530_mdio_w32(gsw, MT7530_SYS_CTRL,
-                       SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
-       usleep_range(10, 20);
-
-       if ((rt_sysc_r32(SYSC_REG_CHIP_REV_ID) & 0xFFFF) == 0x0101) {
-               /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
-               mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK, MTK_MAC_P2_MCR);
-               mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK);
-       } else {
-               /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
-               mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK_FC, MTK_MAC_P1_MCR);
-               mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK_FC);
-       }
-
-       /* GE2, Link down */
-       mtk_switch_w32(gsw, MAC_MCR_FORCE_MODE, MTK_MAC_P2_MCR);
-
-       /* Enable Port 6, P5 as GMAC5, P5 disable */
-       val = mt7530_mdio_r32(gsw, MT7530_MHWTRAP);
-       /* Enable Port 6 */
-       val &= ~MHWTRAP_P6_DIS;
-       /* Disable Port 5 */
-       val |= MHWTRAP_P5_DIS;
-       /* manual override of HW-Trap */
-       val |= MHWTRAP_MANUAL;
-       mt7530_mdio_w32(gsw, MT7530_MHWTRAP, val);
-
-       val = rt_sysc_r32(SYSC_REG_CFG);
-       val = (val >> MT7621_XTAL_SHIFT) & MT7621_XTAL_MASK;
-       if (val < MT7621_XTAL_25 && val >= MT7621_XTAL_40) {
-               /* 40Mhz */
-
-               /* disable MT7530 core clock */
-               _mt7620_mii_write(gsw, 0, 13, 0x1f);
-               _mt7620_mii_write(gsw, 0, 14, 0x410);
-               _mt7620_mii_write(gsw, 0, 13, 0x401f);
-               _mt7620_mii_write(gsw, 0, 14, 0x0);
-
-               /* disable MT7530 PLL */
-               _mt7620_mii_write(gsw, 0, 13, 0x1f);
-               _mt7620_mii_write(gsw, 0, 14, 0x40d);
-               _mt7620_mii_write(gsw, 0, 13, 0x401f);
-               _mt7620_mii_write(gsw, 0, 14, 0x2020);
-
-               /* for MT7530 core clock = 500Mhz */
-               _mt7620_mii_write(gsw, 0, 13, 0x1f);
-               _mt7620_mii_write(gsw, 0, 14, 0x40e);
-               _mt7620_mii_write(gsw, 0, 13, 0x401f);
-               _mt7620_mii_write(gsw, 0, 14, 0x119);
-
-               /* enable MT7530 PLL */
-               _mt7620_mii_write(gsw, 0, 13, 0x1f);
-               _mt7620_mii_write(gsw, 0, 14, 0x40d);
-               _mt7620_mii_write(gsw, 0, 13, 0x401f);
-               _mt7620_mii_write(gsw, 0, 14, 0x2820);
-
-               usleep_range(20, 40);
-
-               /* enable MT7530 core clock */
-               _mt7620_mii_write(gsw, 0, 13, 0x1f);
-               _mt7620_mii_write(gsw, 0, 14, 0x410);
-               _mt7620_mii_write(gsw, 0, 13, 0x401f);
-       }
-
-       /* RGMII */
-       _mt7620_mii_write(gsw, 0, 14, 0x1);
-
-       /* set MT7530 central align */
-       mt7530_mdio_m32(gsw, BIT(0), P6ECR_INTF_MODE_RGMII, MT7530_P6ECR);
-       mt7530_mdio_m32(gsw, TRGMII_TXCTRL_TXC_INV, 0,
-                       MT7530_TRGMII_TXCTRL);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TCK_CTRL, 0x855);
-
-       /* delay setting for 10/1000M */
-       mt7530_mdio_w32(gsw, MT7530_P5RGMIIRXCR,
-                       P5RGMIIRXCR_C_ALIGN | P5RGMIIRXCR_DELAY_2);
-       mt7530_mdio_w32(gsw, MT7530_P5RGMIITXCR, 0x14);
-
-       /* lower Tx Driving*/
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD0_ODT, 0x44);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD1_ODT, 0x44);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD2_ODT, 0x44);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD3_ODT, 0x44);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD4_ODT, 0x44);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD5_ODT, 0x44);
-
-       /* turn on all PHYs */
-       for (i = 0; i <= 4; i++) {
-               val = _mt7620_mii_read(gsw, i, 0);
-               val &= ~BIT(11);
-               _mt7620_mii_write(gsw, i, 0, val);
-       }
-
-#define MT7530_NUM_PORTS 8
-#define REG_ESW_PORT_PCR(x)    (0x2004 | ((x) << 8))
-#define REG_ESW_PORT_PVC(x)    (0x2010 | ((x) << 8))
-#define REG_ESW_PORT_PPBV1(x)  (0x2014 | ((x) << 8))
-#define MT7530_CPU_PORT                6
-
-       /* This is copied from mt7530_apply_config in libreCMC driver */
-       {
-               int i;
-
-               for (i = 0; i < MT7530_NUM_PORTS; i++)
-                       mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(i), 0x00400000);
-
-               mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(MT7530_CPU_PORT),
-                               0x00ff0000);
-
-               for (i = 0; i < MT7530_NUM_PORTS; i++)
-                       mt7530_mdio_w32(gsw, REG_ESW_PORT_PVC(i), 0x810000c0);
-       }
-
-       /* enable irq */
-       mt7530_mdio_m32(gsw, 0, 3 << 16, MT7530_TOP_SIG_CTRL);
-       mt7530_mdio_w32(gsw, MT7530_SYS_INT_EN, 0x1f);
-}
-
-static const struct of_device_id mediatek_gsw_match[] = {
-       { .compatible = "mediatek,mt7621-gsw" },
-       {},
-};
-MODULE_DEVICE_TABLE(of, mediatek_gsw_match);
-
-int mtk_gsw_init(struct mtk_eth *eth)
-{
-       struct device_node *np = eth->switch_np;
-       struct platform_device *pdev = of_find_device_by_node(np);
-       struct mt7620_gsw *gsw;
-
-       if (!pdev)
-               return -ENODEV;
-
-       if (!of_device_is_compatible(np, mediatek_gsw_match->compatible))
-               return -EINVAL;
-
-       gsw = platform_get_drvdata(pdev);
-       eth->sw_priv = gsw;
-
-       if (!gsw->irq)
-               return -EINVAL;
-
-       request_irq(gsw->irq, gsw_interrupt_mt7621, 0,
-                   "gsw", eth);
-       disable_irq(gsw->irq);
-
-       mt7621_hw_init(eth, gsw, np);
-
-       enable_irq(gsw->irq);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mtk_gsw_init);
-
-static int mt7621_gsw_probe(struct platform_device *pdev)
-{
-       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       struct mt7620_gsw *gsw;
-
-       gsw = devm_kzalloc(&pdev->dev, sizeof(struct mt7620_gsw), GFP_KERNEL);
-       if (!gsw)
-               return -ENOMEM;
-
-       gsw->base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(gsw->base))
-               return PTR_ERR(gsw->base);
-
-       gsw->dev = &pdev->dev;
-       gsw->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
-
-       platform_set_drvdata(pdev, gsw);
-
-       return 0;
-}
-
-static int mt7621_gsw_remove(struct platform_device *pdev)
-{
-       platform_set_drvdata(pdev, NULL);
-
-       return 0;
-}
-
-static struct platform_driver gsw_driver = {
-       .probe = mt7621_gsw_probe,
-       .remove = mt7621_gsw_remove,
-       .driver = {
-               .name = "mt7621-gsw",
-               .of_match_table = mediatek_gsw_match,
-       },
-};
-
-module_platform_driver(gsw_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
-MODULE_DESCRIPTION("GBit switch driver for Mediatek MT7621 SoC");
diff --git a/drivers/staging/mt7621-eth/mdio.c b/drivers/staging/mt7621-eth/mdio.c
deleted file mode 100644 (file)
index 5fea6a4..0000000
+++ /dev/null
@@ -1,275 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/phy.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-
-#include "mtk_eth_soc.h"
-#include "mdio.h"
-
-static int mtk_mdio_reset(struct mii_bus *bus)
-{
-       /* TODO */
-       return 0;
-}
-
-static void mtk_phy_link_adjust(struct net_device *dev)
-{
-       struct mtk_eth *eth = netdev_priv(dev);
-       unsigned long flags;
-       int i;
-
-       spin_lock_irqsave(&eth->phy->lock, flags);
-       for (i = 0; i < 8; i++) {
-               if (eth->phy->phy_node[i]) {
-                       struct phy_device *phydev = eth->phy->phy[i];
-                       int status_change = 0;
-
-                       if (phydev->link)
-                               if (eth->phy->duplex[i] != phydev->duplex ||
-                                   eth->phy->speed[i] != phydev->speed)
-                                       status_change = 1;
-
-                       if (phydev->link != eth->link[i])
-                               status_change = 1;
-
-                       switch (phydev->speed) {
-                       case SPEED_1000:
-                       case SPEED_100:
-                       case SPEED_10:
-                               eth->link[i] = phydev->link;
-                               eth->phy->duplex[i] = phydev->duplex;
-                               eth->phy->speed[i] = phydev->speed;
-
-                               if (status_change &&
-                                   eth->soc->mdio_adjust_link)
-                                       eth->soc->mdio_adjust_link(eth, i);
-                               break;
-                       }
-               }
-       }
-       spin_unlock_irqrestore(&eth->phy->lock, flags);
-}
-
-int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
-                        struct device_node *phy_node)
-{
-       const __be32 *_port = NULL;
-       struct phy_device *phydev;
-       int phy_mode, port;
-
-       _port = of_get_property(phy_node, "reg", NULL);
-
-       if (!_port || (be32_to_cpu(*_port) >= 0x20)) {
-               pr_err("%pOFn: invalid port id\n", phy_node);
-               return -EINVAL;
-       }
-       port = be32_to_cpu(*_port);
-       phy_mode = of_get_phy_mode(phy_node);
-       if (phy_mode < 0) {
-               dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
-               eth->phy->phy_node[port] = NULL;
-               return -EINVAL;
-       }
-
-       phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
-                               mtk_phy_link_adjust, 0, phy_mode);
-       if (!phydev) {
-               dev_err(eth->dev, "could not connect to PHY\n");
-               eth->phy->phy_node[port] = NULL;
-               return -ENODEV;
-       }
-
-       phydev->supported &= PHY_1000BT_FEATURES;
-       phydev->advertising = phydev->supported;
-
-       dev_info(eth->dev,
-                "connected port %d to PHY at %s [uid=%08x, driver=%s]\n",
-                port, phydev_name(phydev), phydev->phy_id,
-                phydev->drv->name);
-
-       eth->phy->phy[port] = phydev;
-       eth->link[port] = 0;
-
-       return 0;
-}
-
-static void phy_init(struct mtk_eth *eth, struct mtk_mac *mac,
-                    struct phy_device *phy)
-{
-       phy_attach(eth->netdev[mac->id], phydev_name(phy),
-                  PHY_INTERFACE_MODE_MII);
-
-       phy->autoneg = AUTONEG_ENABLE;
-       phy->speed = 0;
-       phy->duplex = 0;
-       phy_set_max_speed(phy, SPEED_100);
-       phy->advertising = phy->supported | ADVERTISED_Autoneg;
-
-       phy_start_aneg(phy);
-}
-
-static int mtk_phy_connect(struct mtk_mac *mac)
-{
-       struct mtk_eth *eth = mac->hw;
-       int i;
-
-       for (i = 0; i < 8; i++) {
-               if (eth->phy->phy_node[i]) {
-                       if (!mac->phy_dev) {
-                               mac->phy_dev = eth->phy->phy[i];
-                               mac->phy_flags = MTK_PHY_FLAG_PORT;
-                       }
-               } else if (eth->mii_bus) {
-                       struct phy_device *phy;
-
-                       phy = mdiobus_get_phy(eth->mii_bus, i);
-                       if (phy) {
-                               phy_init(eth, mac, phy);
-                               if (!mac->phy_dev) {
-                                       mac->phy_dev = phy;
-                                       mac->phy_flags = MTK_PHY_FLAG_ATTACH;
-                               }
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static void mtk_phy_disconnect(struct mtk_mac *mac)
-{
-       struct mtk_eth *eth = mac->hw;
-       unsigned long flags;
-       int i;
-
-       for (i = 0; i < 8; i++)
-               if (eth->phy->phy_fixed[i]) {
-                       spin_lock_irqsave(&eth->phy->lock, flags);
-                       eth->link[i] = 0;
-                       if (eth->soc->mdio_adjust_link)
-                               eth->soc->mdio_adjust_link(eth, i);
-                       spin_unlock_irqrestore(&eth->phy->lock, flags);
-               } else if (eth->phy->phy[i]) {
-                       phy_disconnect(eth->phy->phy[i]);
-               } else if (eth->mii_bus) {
-                       struct phy_device *phy =
-                               mdiobus_get_phy(eth->mii_bus, i);
-
-                       if (phy)
-                               phy_detach(phy);
-               }
-}
-
-static void mtk_phy_start(struct mtk_mac *mac)
-{
-       struct mtk_eth *eth = mac->hw;
-       unsigned long flags;
-       int i;
-
-       for (i = 0; i < 8; i++) {
-               if (eth->phy->phy_fixed[i]) {
-                       spin_lock_irqsave(&eth->phy->lock, flags);
-                       eth->link[i] = 1;
-                       if (eth->soc->mdio_adjust_link)
-                               eth->soc->mdio_adjust_link(eth, i);
-                       spin_unlock_irqrestore(&eth->phy->lock, flags);
-               } else if (eth->phy->phy[i]) {
-                       phy_start(eth->phy->phy[i]);
-               }
-       }
-}
-
-static void mtk_phy_stop(struct mtk_mac *mac)
-{
-       struct mtk_eth *eth = mac->hw;
-       unsigned long flags;
-       int i;
-
-       for (i = 0; i < 8; i++)
-               if (eth->phy->phy_fixed[i]) {
-                       spin_lock_irqsave(&eth->phy->lock, flags);
-                       eth->link[i] = 0;
-                       if (eth->soc->mdio_adjust_link)
-                               eth->soc->mdio_adjust_link(eth, i);
-                       spin_unlock_irqrestore(&eth->phy->lock, flags);
-               } else if (eth->phy->phy[i]) {
-                       phy_stop(eth->phy->phy[i]);
-               }
-}
-
-static struct mtk_phy phy_ralink = {
-       .connect = mtk_phy_connect,
-       .disconnect = mtk_phy_disconnect,
-       .start = mtk_phy_start,
-       .stop = mtk_phy_stop,
-};
-
-int mtk_mdio_init(struct mtk_eth *eth)
-{
-       struct device_node *mii_np;
-       int err;
-
-       if (!eth->soc->mdio_read || !eth->soc->mdio_write)
-               return 0;
-
-       spin_lock_init(&phy_ralink.lock);
-       eth->phy = &phy_ralink;
-
-       mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
-       if (!mii_np) {
-               dev_err(eth->dev, "no %s child node found", "mdio-bus");
-               return -ENODEV;
-       }
-
-       if (!of_device_is_available(mii_np)) {
-               err = 0;
-               goto err_put_node;
-       }
-
-       eth->mii_bus = mdiobus_alloc();
-       if (!eth->mii_bus) {
-               err = -ENOMEM;
-               goto err_put_node;
-       }
-
-       eth->mii_bus->name = "mdio";
-       eth->mii_bus->read = eth->soc->mdio_read;
-       eth->mii_bus->write = eth->soc->mdio_write;
-       eth->mii_bus->reset = mtk_mdio_reset;
-       eth->mii_bus->priv = eth;
-       eth->mii_bus->parent = eth->dev;
-
-       snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
-       err = of_mdiobus_register(eth->mii_bus, mii_np);
-       if (err)
-               goto err_free_bus;
-
-       return 0;
-
-err_free_bus:
-       kfree(eth->mii_bus);
-err_put_node:
-       of_node_put(mii_np);
-       eth->mii_bus = NULL;
-       return err;
-}
-
-void mtk_mdio_cleanup(struct mtk_eth *eth)
-{
-       if (!eth->mii_bus)
-               return;
-
-       mdiobus_unregister(eth->mii_bus);
-       of_node_put(eth->mii_bus->dev.of_node);
-       kfree(eth->mii_bus);
-}
diff --git a/drivers/staging/mt7621-eth/mdio.h b/drivers/staging/mt7621-eth/mdio.h
deleted file mode 100644 (file)
index b14e238..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef _RALINK_MDIO_H__
-#define _RALINK_MDIO_H__
-
-#ifdef CONFIG_NET_MEDIATEK_MDIO
-int mtk_mdio_init(struct mtk_eth *eth);
-void mtk_mdio_cleanup(struct mtk_eth *eth);
-int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
-                        struct device_node *phy_node);
-#else
-static inline int mtk_mdio_init(struct mtk_eth *eth) { return 0; }
-static inline void mtk_mdio_cleanup(struct mtk_eth *eth) {}
-#endif
-#endif
diff --git a/drivers/staging/mt7621-eth/mdio_mt7620.c b/drivers/staging/mt7621-eth/mdio_mt7620.c
deleted file mode 100644 (file)
index ced605c..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-#include "mdio.h"
-
-static int mt7620_mii_busy_wait(struct mt7620_gsw *gsw)
-{
-       unsigned long t_start = jiffies;
-
-       while (1) {
-               if (!(mtk_switch_r32(gsw,
-                                    gsw->piac_offset + MT7620_GSW_REG_PIAC) &
-                                    GSW_MDIO_ACCESS))
-                       return 0;
-               if (time_after(jiffies, t_start + GSW_REG_PHY_TIMEOUT))
-                       break;
-       }
-
-       dev_err(gsw->dev, "mdio: MDIO timeout\n");
-       return -1;
-}
-
-u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
-                     u32 phy_register, u32 write_data)
-{
-       if (mt7620_mii_busy_wait(gsw))
-               return -1;
-
-       write_data &= 0xffff;
-
-       mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_WRITE |
-               (phy_register << GSW_MDIO_REG_SHIFT) |
-               (phy_addr << GSW_MDIO_ADDR_SHIFT) | write_data,
-               MT7620_GSW_REG_PIAC);
-
-       if (mt7620_mii_busy_wait(gsw))
-               return -1;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(_mt7620_mii_write);
-
-u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg)
-{
-       u32 d;
-
-       if (mt7620_mii_busy_wait(gsw))
-               return 0xffff;
-
-       mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_READ |
-               (phy_reg << GSW_MDIO_REG_SHIFT) |
-               (phy_addr << GSW_MDIO_ADDR_SHIFT),
-               MT7620_GSW_REG_PIAC);
-
-       if (mt7620_mii_busy_wait(gsw))
-               return 0xffff;
-
-       d = mtk_switch_r32(gsw, MT7620_GSW_REG_PIAC) & 0xffff;
-
-       return d;
-}
-EXPORT_SYMBOL_GPL(_mt7620_mii_read);
-
-int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val)
-{
-       struct mtk_eth *eth = bus->priv;
-       struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-
-       return _mt7620_mii_write(gsw, phy_addr, phy_reg, val);
-}
-
-int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
-{
-       struct mtk_eth *eth = bus->priv;
-       struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-
-       return _mt7620_mii_read(gsw, phy_addr, phy_reg);
-}
-
-void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val)
-{
-       _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
-       _mt7620_mii_write(gsw, 0x1f, (reg >> 2) & 0xf,  val & 0xffff);
-       _mt7620_mii_write(gsw, 0x1f, 0x10, val >> 16);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_w32);
-
-u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg)
-{
-       u16 high, low;
-
-       _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
-       low = _mt7620_mii_read(gsw, 0x1f, (reg >> 2) & 0xf);
-       high = _mt7620_mii_read(gsw, 0x1f, 0x10);
-
-       return (high << 16) | (low & 0xffff);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_r32);
-
-void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg)
-{
-       u32 val = mt7530_mdio_r32(gsw, reg);
-
-       val &= ~mask;
-       val |= set;
-       mt7530_mdio_w32(gsw, reg, val);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_m32);
-
-static unsigned char *mtk_speed_str(int speed)
-{
-       switch (speed) {
-       case 2:
-       case SPEED_1000:
-               return "1000";
-       case 1:
-       case SPEED_100:
-               return "100";
-       case 0:
-       case SPEED_10:
-               return "10";
-       }
-
-       return "? ";
-}
-
-int mt7620_has_carrier(struct mtk_eth *eth)
-{
-       struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-       int i;
-
-       for (i = 0; i < GSW_PORT6; i++)
-               if (mt7530_mdio_r32(gsw, GSW_REG_PORT_STATUS(i)) & 0x1)
-                       return 1;
-       return 0;
-}
-
-void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
-                            int speed, int duplex)
-{
-       struct mt7620_gsw *gsw = eth->sw_priv;
-
-       if (link)
-               dev_info(gsw->dev, "port %d link up (%sMbps/%s duplex)\n",
-                        port, mtk_speed_str(speed),
-                        (duplex) ? "Full" : "Half");
-       else
-               dev_info(gsw->dev, "port %d link down\n", port);
-}
-
-void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port)
-{
-       mt7620_print_link_state(eth, port, eth->link[port],
-                               eth->phy->speed[port],
-                               (eth->phy->duplex[port] == DUPLEX_FULL));
-}
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
deleted file mode 100644 (file)
index 6027b19..0000000
+++ /dev/null
@@ -1,2176 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/mfd/syscon.h>
-#include <linux/clk.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-#include <linux/if_vlan.h>
-#include <linux/reset.h>
-#include <linux/tcp.h>
-#include <linux/io.h>
-#include <linux/bug.h>
-#include <linux/regmap.h>
-
-#include "mtk_eth_soc.h"
-#include "mdio.h"
-#include "ethtool.h"
-
-#define        MAX_RX_LENGTH           1536
-#define MTK_RX_ETH_HLEN                (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
-#define MTK_RX_HLEN            (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
-#define DMA_DUMMY_DESC         0xffffffff
-#define MTK_DEFAULT_MSG_ENABLE \
-               (NETIF_MSG_DRV | \
-               NETIF_MSG_PROBE | \
-               NETIF_MSG_LINK | \
-               NETIF_MSG_TIMER | \
-               NETIF_MSG_IFDOWN | \
-               NETIF_MSG_IFUP | \
-               NETIF_MSG_RX_ERR | \
-               NETIF_MSG_TX_ERR)
-
-#define TX_DMA_DESP2_DEF       (TX_DMA_LS0 | TX_DMA_DONE)
-#define NEXT_TX_DESP_IDX(X)    (((X) + 1) & (ring->tx_ring_size - 1))
-#define NEXT_RX_DESP_IDX(X)    (((X) + 1) & (ring->rx_ring_size - 1))
-
-#define SYSC_REG_RSTCTRL       0x34
-
-static int mtk_msg_level = -1;
-module_param_named(msg_level, mtk_msg_level, int, 0);
-MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
-
-static const u16 mtk_reg_table_default[MTK_REG_COUNT] = {
-       [MTK_REG_PDMA_GLO_CFG] = MTK_PDMA_GLO_CFG,
-       [MTK_REG_PDMA_RST_CFG] = MTK_PDMA_RST_CFG,
-       [MTK_REG_DLY_INT_CFG] = MTK_DLY_INT_CFG,
-       [MTK_REG_TX_BASE_PTR0] = MTK_TX_BASE_PTR0,
-       [MTK_REG_TX_MAX_CNT0] = MTK_TX_MAX_CNT0,
-       [MTK_REG_TX_CTX_IDX0] = MTK_TX_CTX_IDX0,
-       [MTK_REG_TX_DTX_IDX0] = MTK_TX_DTX_IDX0,
-       [MTK_REG_RX_BASE_PTR0] = MTK_RX_BASE_PTR0,
-       [MTK_REG_RX_MAX_CNT0] = MTK_RX_MAX_CNT0,
-       [MTK_REG_RX_CALC_IDX0] = MTK_RX_CALC_IDX0,
-       [MTK_REG_RX_DRX_IDX0] = MTK_RX_DRX_IDX0,
-       [MTK_REG_MTK_INT_ENABLE] = MTK_INT_ENABLE,
-       [MTK_REG_MTK_INT_STATUS] = MTK_INT_STATUS,
-       [MTK_REG_MTK_DMA_VID_BASE] = MTK_DMA_VID0,
-       [MTK_REG_MTK_COUNTER_BASE] = MTK_GDMA1_TX_GBCNT,
-       [MTK_REG_MTK_RST_GL] = MTK_RST_GL,
-};
-
-static const u16 *mtk_reg_table = mtk_reg_table_default;
-
-void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg)
-{
-       __raw_writel(val, eth->base + reg);
-}
-
-u32 mtk_r32(struct mtk_eth *eth, unsigned int reg)
-{
-       return __raw_readl(eth->base + reg);
-}
-
-static void mtk_reg_w32(struct mtk_eth *eth, u32 val, enum mtk_reg reg)
-{
-       mtk_w32(eth, val, mtk_reg_table[reg]);
-}
-
-static u32 mtk_reg_r32(struct mtk_eth *eth, enum mtk_reg reg)
-{
-       return mtk_r32(eth, mtk_reg_table[reg]);
-}
-
-/* these bits are also exposed via the reset-controller API. however the switch
- * and FE need to be brought out of reset in the exakt same moemtn and the
- * reset-controller api does not provide this feature yet. Do the reset manually
- * until we fixed the reset-controller api to be able to do this
- */
-void mtk_reset(struct mtk_eth *eth, u32 reset_bits)
-{
-       u32 val;
-
-       regmap_read(eth->ethsys, SYSC_REG_RSTCTRL, &val);
-       val |= reset_bits;
-       regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
-       usleep_range(10, 20);
-       val &= ~reset_bits;
-       regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
-       usleep_range(10, 20);
-}
-EXPORT_SYMBOL(mtk_reset);
-
-static inline void mtk_irq_ack(struct mtk_eth *eth, u32 mask)
-{
-       if (eth->soc->dma_type & MTK_PDMA)
-               mtk_reg_w32(eth, mask, MTK_REG_MTK_INT_STATUS);
-       if (eth->soc->dma_type & MTK_QDMA)
-               mtk_w32(eth, mask, MTK_QMTK_INT_STATUS);
-}
-
-static inline u32 mtk_irq_pending(struct mtk_eth *eth)
-{
-       u32 status = 0;
-
-       if (eth->soc->dma_type & MTK_PDMA)
-               status |= mtk_reg_r32(eth, MTK_REG_MTK_INT_STATUS);
-       if (eth->soc->dma_type & MTK_QDMA)
-               status |= mtk_r32(eth, MTK_QMTK_INT_STATUS);
-
-       return status;
-}
-
-static void mtk_irq_ack_status(struct mtk_eth *eth, u32 mask)
-{
-       u32 status_reg = MTK_REG_MTK_INT_STATUS;
-
-       if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
-               status_reg = MTK_REG_MTK_INT_STATUS2;
-
-       mtk_reg_w32(eth, mask, status_reg);
-}
-
-static u32 mtk_irq_pending_status(struct mtk_eth *eth)
-{
-       u32 status_reg = MTK_REG_MTK_INT_STATUS;
-
-       if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
-               status_reg = MTK_REG_MTK_INT_STATUS2;
-
-       return mtk_reg_r32(eth, status_reg);
-}
-
-static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
-{
-       u32 val;
-
-       if (eth->soc->dma_type & MTK_PDMA) {
-               val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-               mtk_reg_w32(eth, val & ~mask, MTK_REG_MTK_INT_ENABLE);
-               /* flush write */
-               mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-       }
-       if (eth->soc->dma_type & MTK_QDMA) {
-               val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-               mtk_w32(eth, val & ~mask, MTK_QMTK_INT_ENABLE);
-               /* flush write */
-               mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-       }
-}
-
-static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
-{
-       u32 val;
-
-       if (eth->soc->dma_type & MTK_PDMA) {
-               val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-               mtk_reg_w32(eth, val | mask, MTK_REG_MTK_INT_ENABLE);
-               /* flush write */
-               mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-       }
-       if (eth->soc->dma_type & MTK_QDMA) {
-               val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-               mtk_w32(eth, val | mask, MTK_QMTK_INT_ENABLE);
-               /* flush write */
-               mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-       }
-}
-
-static inline u32 mtk_irq_enabled(struct mtk_eth *eth)
-{
-       u32 enabled = 0;
-
-       if (eth->soc->dma_type & MTK_PDMA)
-               enabled |= mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-       if (eth->soc->dma_type & MTK_QDMA)
-               enabled |= mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-
-       return enabled;
-}
-
-static inline void mtk_hw_set_macaddr(struct mtk_mac *mac,
-                                     unsigned char *macaddr)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&mac->hw->page_lock, flags);
-       mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA1_MAC_ADRH);
-       mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
-               (macaddr[4] << 8) | macaddr[5],
-               MTK_GDMA1_MAC_ADRL);
-       spin_unlock_irqrestore(&mac->hw->page_lock, flags);
-}
-
-static int mtk_set_mac_address(struct net_device *dev, void *p)
-{
-       int ret = eth_mac_addr(dev, p);
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-
-       if (ret)
-               return ret;
-
-       if (eth->soc->set_mac)
-               eth->soc->set_mac(mac, dev->dev_addr);
-       else
-               mtk_hw_set_macaddr(mac, p);
-
-       return 0;
-}
-
-static inline int mtk_max_frag_size(int mtu)
-{
-       /* make sure buf_size will be at least MAX_RX_LENGTH */
-       if (mtu + MTK_RX_ETH_HLEN < MAX_RX_LENGTH)
-               mtu = MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
-
-       return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
-               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-}
-
-static inline int mtk_max_buf_size(int frag_size)
-{
-       int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
-                      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
-       WARN_ON(buf_size < MAX_RX_LENGTH);
-
-       return buf_size;
-}
-
-static inline void mtk_get_rxd(struct mtk_rx_dma *rxd,
-                              struct mtk_rx_dma *dma_rxd)
-{
-       rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
-       rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
-       rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
-       rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
-}
-
-static inline void mtk_set_txd_pdma(struct mtk_tx_dma *txd,
-                                   struct mtk_tx_dma *dma_txd)
-{
-       WRITE_ONCE(dma_txd->txd1, txd->txd1);
-       WRITE_ONCE(dma_txd->txd3, txd->txd3);
-       WRITE_ONCE(dma_txd->txd4, txd->txd4);
-       /* clean dma done flag last */
-       WRITE_ONCE(dma_txd->txd2, txd->txd2);
-}
-
-static void mtk_clean_rx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
-{
-       int i;
-
-       if (ring->rx_data && ring->rx_dma) {
-               for (i = 0; i < ring->rx_ring_size; i++) {
-                       if (!ring->rx_data[i])
-                               continue;
-                       if (!ring->rx_dma[i].rxd1)
-                               continue;
-                       dma_unmap_single(eth->dev,
-                                        ring->rx_dma[i].rxd1,
-                                        ring->rx_buf_size,
-                                        DMA_FROM_DEVICE);
-                       skb_free_frag(ring->rx_data[i]);
-               }
-               kfree(ring->rx_data);
-               ring->rx_data = NULL;
-       }
-
-       if (ring->rx_dma) {
-               dma_free_coherent(eth->dev,
-                                 ring->rx_ring_size * sizeof(*ring->rx_dma),
-                                 ring->rx_dma,
-                                 ring->rx_phys);
-               ring->rx_dma = NULL;
-       }
-}
-
-static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring)
-{
-       int i, pad = 0;
-
-       ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
-       ring->rx_buf_size = mtk_max_buf_size(ring->frag_size);
-       ring->rx_ring_size = eth->soc->dma_ring_size;
-       ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data),
-                               GFP_KERNEL);
-       if (!ring->rx_data)
-               goto no_rx_mem;
-
-       for (i = 0; i < ring->rx_ring_size; i++) {
-               ring->rx_data[i] = netdev_alloc_frag(ring->frag_size);
-               if (!ring->rx_data[i])
-                       goto no_rx_mem;
-       }
-
-       ring->rx_dma =
-               dma_alloc_coherent(eth->dev,
-                                  ring->rx_ring_size * sizeof(*ring->rx_dma),
-                                  &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO);
-       if (!ring->rx_dma)
-               goto no_rx_mem;
-
-       if (!eth->soc->rx_2b_offset)
-               pad = NET_IP_ALIGN;
-
-       for (i = 0; i < ring->rx_ring_size; i++) {
-               dma_addr_t dma_addr = dma_map_single(eth->dev,
-                               ring->rx_data[i] + NET_SKB_PAD + pad,
-                               ring->rx_buf_size,
-                               DMA_FROM_DEVICE);
-               if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
-                       goto no_rx_mem;
-               ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
-
-               if (eth->soc->rx_sg_dma)
-                       ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
-               else
-                       ring->rx_dma[i].rxd2 = RX_DMA_LSO;
-       }
-       ring->rx_calc_idx = ring->rx_ring_size - 1;
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-
-       return 0;
-
-no_rx_mem:
-       return -ENOMEM;
-}
-
-static void mtk_txd_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
-{
-       if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
-               dma_unmap_single(dev,
-                                dma_unmap_addr(tx_buf, dma_addr0),
-                                dma_unmap_len(tx_buf, dma_len0),
-                                DMA_TO_DEVICE);
-       } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
-               dma_unmap_page(dev,
-                              dma_unmap_addr(tx_buf, dma_addr0),
-                              dma_unmap_len(tx_buf, dma_len0),
-                              DMA_TO_DEVICE);
-       }
-       if (tx_buf->flags & MTK_TX_FLAGS_PAGE1)
-               dma_unmap_page(dev,
-                              dma_unmap_addr(tx_buf, dma_addr1),
-                              dma_unmap_len(tx_buf, dma_len1),
-                              DMA_TO_DEVICE);
-
-       tx_buf->flags = 0;
-       if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC))
-               dev_kfree_skb_any(tx_buf->skb);
-       tx_buf->skb = NULL;
-}
-
-static void mtk_pdma_tx_clean(struct mtk_eth *eth)
-{
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       int i;
-
-       if (ring->tx_buf) {
-               for (i = 0; i < ring->tx_ring_size; i++)
-                       mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
-               kfree(ring->tx_buf);
-               ring->tx_buf = NULL;
-       }
-
-       if (ring->tx_dma) {
-               dma_free_coherent(eth->dev,
-                                 ring->tx_ring_size * sizeof(*ring->tx_dma),
-                                 ring->tx_dma,
-                                 ring->tx_phys);
-               ring->tx_dma = NULL;
-       }
-}
-
-static void mtk_qdma_tx_clean(struct mtk_eth *eth)
-{
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       int i;
-
-       if (ring->tx_buf) {
-               for (i = 0; i < ring->tx_ring_size; i++)
-                       mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
-               kfree(ring->tx_buf);
-               ring->tx_buf = NULL;
-       }
-
-       if (ring->tx_dma) {
-               dma_free_coherent(eth->dev,
-                                 ring->tx_ring_size * sizeof(*ring->tx_dma),
-                                 ring->tx_dma,
-                                 ring->tx_phys);
-               ring->tx_dma = NULL;
-       }
-}
-
-void mtk_stats_update_mac(struct mtk_mac *mac)
-{
-       struct mtk_hw_stats *hw_stats = mac->hw_stats;
-       unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
-       u64 stats;
-
-       base += hw_stats->reg_offset;
-
-       u64_stats_update_begin(&hw_stats->syncp);
-
-       if (mac->hw->soc->new_stats) {
-               hw_stats->rx_bytes += mtk_r32(mac->hw, base);
-               stats =  mtk_r32(mac->hw, base + 0x04);
-               if (stats)
-                       hw_stats->rx_bytes += (stats << 32);
-               hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
-               hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
-               hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
-               hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
-               hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
-               hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
-               hw_stats->rx_flow_control_packets +=
-                                               mtk_r32(mac->hw, base + 0x24);
-               hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
-               hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
-               hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
-               stats =  mtk_r32(mac->hw, base + 0x34);
-               if (stats)
-                       hw_stats->tx_bytes += (stats << 32);
-               hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
-       } else {
-               hw_stats->tx_bytes += mtk_r32(mac->hw, base);
-               hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x04);
-               hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x08);
-               hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x0c);
-               hw_stats->rx_bytes += mtk_r32(mac->hw, base + 0x20);
-               hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x24);
-               hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x28);
-               hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x2c);
-               hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x30);
-               hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x34);
-               hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x38);
-               hw_stats->rx_flow_control_packets +=
-                                               mtk_r32(mac->hw, base + 0x3c);
-       }
-
-       u64_stats_update_end(&hw_stats->syncp);
-}
-
-static void mtk_get_stats64(struct net_device *dev,
-                           struct rtnl_link_stats64 *storage)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_hw_stats *hw_stats = mac->hw_stats;
-       unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
-       unsigned int start;
-
-       if (!base) {
-               netdev_stats_to_stats64(storage, &dev->stats);
-               return;
-       }
-
-       if (netif_running(dev) && netif_device_present(dev)) {
-               if (spin_trylock(&hw_stats->stats_lock)) {
-                       mtk_stats_update_mac(mac);
-                       spin_unlock(&hw_stats->stats_lock);
-               }
-       }
-
-       do {
-               start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
-               storage->rx_packets = hw_stats->rx_packets;
-               storage->tx_packets = hw_stats->tx_packets;
-               storage->rx_bytes = hw_stats->rx_bytes;
-               storage->tx_bytes = hw_stats->tx_bytes;
-               storage->collisions = hw_stats->tx_collisions;
-               storage->rx_length_errors = hw_stats->rx_short_errors +
-                       hw_stats->rx_long_errors;
-               storage->rx_over_errors = hw_stats->rx_overflow;
-               storage->rx_crc_errors = hw_stats->rx_fcs_errors;
-               storage->rx_errors = hw_stats->rx_checksum_errors;
-               storage->tx_aborted_errors = hw_stats->tx_skip;
-       } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
-
-       storage->tx_errors = dev->stats.tx_errors;
-       storage->rx_dropped = dev->stats.rx_dropped;
-       storage->tx_dropped = dev->stats.tx_dropped;
-}
-
-static int mtk_vlan_rx_add_vid(struct net_device *dev,
-                              __be16 proto, u16 vid)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       u32 idx = (vid & 0xf);
-       u32 vlan_cfg;
-
-       if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
-             (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
-               return 0;
-
-       if (test_bit(idx, &eth->vlan_map)) {
-               netdev_warn(dev, "disable tx vlan offload\n");
-               dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
-               netdev_update_features(dev);
-       } else {
-               vlan_cfg = mtk_r32(eth,
-                                  mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
-                                  ((idx >> 1) << 2));
-               if (idx & 0x1) {
-                       vlan_cfg &= 0xffff;
-                       vlan_cfg |= (vid << 16);
-               } else {
-                       vlan_cfg &= 0xffff0000;
-                       vlan_cfg |= vid;
-               }
-               mtk_w32(eth,
-                       vlan_cfg, mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
-                       ((idx >> 1) << 2));
-               set_bit(idx, &eth->vlan_map);
-       }
-
-       return 0;
-}
-
-static int mtk_vlan_rx_kill_vid(struct net_device *dev,
-                               __be16 proto, u16 vid)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       u32 idx = (vid & 0xf);
-
-       if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
-             (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
-               return 0;
-
-       clear_bit(idx, &eth->vlan_map);
-
-       return 0;
-}
-
-static inline u32 mtk_pdma_empty_txd(struct mtk_tx_ring *ring)
-{
-       barrier();
-       return (u32)(ring->tx_ring_size -
-                    ((ring->tx_next_idx - ring->tx_free_idx) &
-                     (ring->tx_ring_size - 1)));
-}
-
-static int mtk_skb_padto(struct sk_buff *skb, struct mtk_eth *eth)
-{
-       unsigned int len;
-       int ret;
-
-       if (unlikely(skb->len >= VLAN_ETH_ZLEN))
-               return 0;
-
-       if (eth->soc->padding_64b && !eth->soc->padding_bug)
-               return 0;
-
-       if (skb_vlan_tag_present(skb))
-               len = ETH_ZLEN;
-       else if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
-               len = VLAN_ETH_ZLEN;
-       else if (!eth->soc->padding_64b)
-               len = ETH_ZLEN;
-       else
-               return 0;
-
-       if (skb->len >= len)
-               return 0;
-
-       ret = skb_pad(skb, len - skb->len);
-       if (ret < 0)
-               return ret;
-       skb->len = len;
-       skb_set_tail_pointer(skb, len);
-
-       return ret;
-}
-
-static int mtk_pdma_tx_map(struct sk_buff *skb, struct net_device *dev,
-                          int tx_num, struct mtk_tx_ring *ring, bool gso)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       struct skb_frag_struct *frag;
-       struct mtk_tx_dma txd, *ptxd;
-       struct mtk_tx_buf *tx_buf;
-       int i, j, k, frag_size, frag_map_size, offset;
-       dma_addr_t mapped_addr;
-       unsigned int nr_frags;
-       u32 def_txd4;
-
-       if (mtk_skb_padto(skb, eth)) {
-               netif_warn(eth, tx_err, dev, "tx padding failed!\n");
-               return -1;
-       }
-
-       tx_buf = &ring->tx_buf[ring->tx_next_idx];
-       memset(tx_buf, 0, sizeof(*tx_buf));
-       memset(&txd, 0, sizeof(txd));
-       nr_frags = skb_shinfo(skb)->nr_frags;
-
-       /* init tx descriptor */
-       def_txd4 = eth->soc->txd4;
-       txd.txd4 = def_txd4;
-
-       if (eth->soc->mac_count > 1)
-               txd.txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
-
-       if (gso)
-               txd.txd4 |= TX_DMA_TSO;
-
-       /* TX Checksum offload */
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               txd.txd4 |= TX_DMA_CHKSUM;
-
-       /* VLAN header offload */
-       if (skb_vlan_tag_present(skb)) {
-               u16 tag = skb_vlan_tag_get(skb);
-
-               txd.txd4 |= TX_DMA_INS_VLAN |
-                       ((tag >> VLAN_PRIO_SHIFT) << 4) |
-                       (tag & 0xF);
-       }
-
-       mapped_addr = dma_map_single(&dev->dev, skb->data,
-                                    skb_headlen(skb), DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
-               return -1;
-
-       txd.txd1 = mapped_addr;
-       txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb));
-
-       tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
-       dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
-       dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
-
-       /* TX SG offload */
-       j = ring->tx_next_idx;
-       k = 0;
-       for (i = 0; i < nr_frags; i++) {
-               offset = 0;
-               frag = &skb_shinfo(skb)->frags[i];
-               frag_size = skb_frag_size(frag);
-
-               while (frag_size > 0) {
-                       frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
-                       mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
-                                                      frag_map_size,
-                                                      DMA_TO_DEVICE);
-                       if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
-                               goto err_dma;
-
-                       if (k & 0x1) {
-                               j = NEXT_TX_DESP_IDX(j);
-                               txd.txd1 = mapped_addr;
-                               txd.txd2 = TX_DMA_PLEN0(frag_map_size);
-                               txd.txd4 = def_txd4;
-
-                               tx_buf = &ring->tx_buf[j];
-                               memset(tx_buf, 0, sizeof(*tx_buf));
-
-                               tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
-                               dma_unmap_addr_set(tx_buf, dma_addr0,
-                                                  mapped_addr);
-                               dma_unmap_len_set(tx_buf, dma_len0,
-                                                 frag_map_size);
-                       } else {
-                               txd.txd3 = mapped_addr;
-                               txd.txd2 |= TX_DMA_PLEN1(frag_map_size);
-
-                               tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
-                               tx_buf->flags |= MTK_TX_FLAGS_PAGE1;
-                               dma_unmap_addr_set(tx_buf, dma_addr1,
-                                                  mapped_addr);
-                               dma_unmap_len_set(tx_buf, dma_len1,
-                                                 frag_map_size);
-
-                               if (!((i == (nr_frags - 1)) &&
-                                     (frag_map_size == frag_size))) {
-                                       mtk_set_txd_pdma(&txd,
-                                                        &ring->tx_dma[j]);
-                                       memset(&txd, 0, sizeof(txd));
-                               }
-                       }
-                       frag_size -= frag_map_size;
-                       offset += frag_map_size;
-                       k++;
-               }
-       }
-
-       /* set last segment */
-       if (k & 0x1)
-               txd.txd2 |= TX_DMA_LS1;
-       else
-               txd.txd2 |= TX_DMA_LS0;
-       mtk_set_txd_pdma(&txd, &ring->tx_dma[j]);
-
-       /* store skb to cleanup */
-       tx_buf->skb = skb;
-
-       netdev_sent_queue(dev, skb->len);
-       skb_tx_timestamp(skb);
-
-       ring->tx_next_idx = NEXT_TX_DESP_IDX(j);
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-       atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-
-       if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
-               mtk_reg_w32(eth, ring->tx_next_idx, MTK_REG_TX_CTX_IDX0);
-
-       return 0;
-
-err_dma:
-       j = ring->tx_next_idx;
-       for (i = 0; i < tx_num; i++) {
-               ptxd = &ring->tx_dma[j];
-               tx_buf = &ring->tx_buf[j];
-
-               /* unmap dma */
-               mtk_txd_unmap(&dev->dev, tx_buf);
-
-               ptxd->txd2 = TX_DMA_DESP2_DEF;
-               j = NEXT_TX_DESP_IDX(j);
-       }
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-       return -1;
-}
-
-/* the qdma core needs scratch memory to be setup */
-static int mtk_init_fq_dma(struct mtk_eth *eth)
-{
-       dma_addr_t dma_addr, phy_ring_head, phy_ring_tail;
-       int cnt = eth->soc->dma_ring_size;
-       int i;
-
-       eth->scratch_ring = dma_alloc_coherent(eth->dev,
-                                              cnt * sizeof(struct mtk_tx_dma),
-                                              &phy_ring_head,
-                                              GFP_ATOMIC | __GFP_ZERO);
-       if (unlikely(!eth->scratch_ring))
-               return -ENOMEM;
-
-       eth->scratch_head = kcalloc(cnt, QDMA_PAGE_SIZE,
-                                   GFP_KERNEL);
-       dma_addr = dma_map_single(eth->dev,
-                                 eth->scratch_head, cnt * QDMA_PAGE_SIZE,
-                                 DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
-               return -ENOMEM;
-
-       memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
-       phy_ring_tail = phy_ring_head + (sizeof(struct mtk_tx_dma) * (cnt - 1));
-
-       for (i = 0; i < cnt; i++) {
-               eth->scratch_ring[i].txd1 = (dma_addr + (i * QDMA_PAGE_SIZE));
-               if (i < cnt - 1)
-                       eth->scratch_ring[i].txd2 = (phy_ring_head +
-                               ((i + 1) * sizeof(struct mtk_tx_dma)));
-               eth->scratch_ring[i].txd3 = TX_QDMA_SDL(QDMA_PAGE_SIZE);
-       }
-
-       mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
-       mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
-       mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
-       mtk_w32(eth, QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
-
-       return 0;
-}
-
-static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
-{
-       void *ret = ring->tx_dma;
-
-       return ret + (desc - ring->tx_phys);
-}
-
-static struct mtk_tx_dma *mtk_tx_next_qdma(struct mtk_tx_ring *ring,
-                                          struct mtk_tx_dma *txd)
-{
-       return mtk_qdma_phys_to_virt(ring, txd->txd2);
-}
-
-static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
-                                            struct mtk_tx_dma *txd)
-{
-       int idx = txd - ring->tx_dma;
-
-       return &ring->tx_buf[idx];
-}
-
-static int mtk_qdma_tx_map(struct sk_buff *skb, struct net_device *dev,
-                          int tx_num, struct mtk_tx_ring *ring, bool gso)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       struct mtk_tx_dma *itxd, *txd;
-       struct mtk_tx_buf *tx_buf;
-       dma_addr_t mapped_addr;
-       unsigned int nr_frags;
-       int i, n_desc = 1;
-       u32 txd4 = eth->soc->txd4;
-
-       itxd = ring->tx_next_free;
-       if (itxd == ring->tx_last_free)
-               return -ENOMEM;
-
-       if (eth->soc->mac_count > 1)
-               txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
-
-       tx_buf = mtk_desc_to_tx_buf(ring, itxd);
-       memset(tx_buf, 0, sizeof(*tx_buf));
-
-       if (gso)
-               txd4 |= TX_DMA_TSO;
-
-       /* TX Checksum offload */
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               txd4 |= TX_DMA_CHKSUM;
-
-       /* VLAN header offload */
-       if (skb_vlan_tag_present(skb))
-               txd4 |= TX_DMA_INS_VLAN_MT7621 | skb_vlan_tag_get(skb);
-
-       mapped_addr = dma_map_single(&dev->dev, skb->data,
-                                    skb_headlen(skb), DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
-               return -ENOMEM;
-
-       WRITE_ONCE(itxd->txd1, mapped_addr);
-       tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
-       dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
-       dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
-
-       /* TX SG offload */
-       txd = itxd;
-       nr_frags = skb_shinfo(skb)->nr_frags;
-       for (i = 0; i < nr_frags; i++) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
-               unsigned int offset = 0;
-               int frag_size = skb_frag_size(frag);
-
-               while (frag_size) {
-                       bool last_frag = false;
-                       unsigned int frag_map_size;
-
-                       txd = mtk_tx_next_qdma(ring, txd);
-                       if (txd == ring->tx_last_free)
-                               goto err_dma;
-
-                       n_desc++;
-                       frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
-                       mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
-                                                      frag_map_size,
-                                                      DMA_TO_DEVICE);
-                       if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
-                               goto err_dma;
-
-                       if (i == nr_frags - 1 &&
-                           (frag_size - frag_map_size) == 0)
-                               last_frag = true;
-
-                       WRITE_ONCE(txd->txd1, mapped_addr);
-                       WRITE_ONCE(txd->txd3, (QDMA_TX_SWC |
-                                              TX_DMA_PLEN0(frag_map_size) |
-                                              last_frag * TX_DMA_LS0) |
-                                              mac->id);
-                       WRITE_ONCE(txd->txd4, 0);
-
-                       tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
-                       tx_buf = mtk_desc_to_tx_buf(ring, txd);
-                       memset(tx_buf, 0, sizeof(*tx_buf));
-
-                       tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
-                       dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
-                       dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
-                       frag_size -= frag_map_size;
-                       offset += frag_map_size;
-               }
-       }
-
-       /* store skb to cleanup */
-       tx_buf->skb = skb;
-
-       WRITE_ONCE(itxd->txd4, txd4);
-       WRITE_ONCE(itxd->txd3, (QDMA_TX_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
-                               (!nr_frags * TX_DMA_LS0)));
-
-       netdev_sent_queue(dev, skb->len);
-       skb_tx_timestamp(skb);
-
-       ring->tx_next_free = mtk_tx_next_qdma(ring, txd);
-       atomic_sub(n_desc, &ring->tx_free_count);
-
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-
-       if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
-               mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
-
-       return 0;
-
-err_dma:
-       do {
-               tx_buf = mtk_desc_to_tx_buf(ring, txd);
-
-               /* unmap dma */
-               mtk_txd_unmap(&dev->dev, tx_buf);
-
-               itxd->txd3 = TX_DMA_DESP2_DEF;
-               itxd = mtk_tx_next_qdma(ring, itxd);
-       } while (itxd != txd);
-
-       return -ENOMEM;
-}
-
-static inline int mtk_cal_txd_req(struct sk_buff *skb)
-{
-       int i, nfrags;
-       struct skb_frag_struct *frag;
-
-       nfrags = 1;
-       if (skb_is_gso(skb)) {
-               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       frag = &skb_shinfo(skb)->frags[i];
-                       nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN);
-               }
-       } else {
-               nfrags += skb_shinfo(skb)->nr_frags;
-       }
-
-       return DIV_ROUND_UP(nfrags, 2);
-}
-
-static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       struct net_device_stats *stats = &dev->stats;
-       int tx_num;
-       int len = skb->len;
-       bool gso = false;
-
-       tx_num = mtk_cal_txd_req(skb);
-       if (unlikely(atomic_read(&ring->tx_free_count) <= tx_num)) {
-               netif_stop_queue(dev);
-               netif_err(eth, tx_queued, dev,
-                         "Tx Ring full when queue awake!\n");
-               return NETDEV_TX_BUSY;
-       }
-
-       /* TSO: fill MSS info in tcp checksum field */
-       if (skb_is_gso(skb)) {
-               if (skb_cow_head(skb, 0)) {
-                       netif_warn(eth, tx_err, dev,
-                                  "GSO expand head fail.\n");
-                       goto drop;
-               }
-
-               if (skb_shinfo(skb)->gso_type &
-                               (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
-                       gso = true;
-                       tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
-               }
-       }
-
-       if (ring->tx_map(skb, dev, tx_num, ring, gso) < 0)
-               goto drop;
-
-       stats->tx_packets++;
-       stats->tx_bytes += len;
-
-       if (unlikely(atomic_read(&ring->tx_free_count) <= ring->tx_thresh)) {
-               netif_stop_queue(dev);
-               smp_mb();
-               if (unlikely(atomic_read(&ring->tx_free_count) >
-                            ring->tx_thresh))
-                       netif_wake_queue(dev);
-       }
-
-       return NETDEV_TX_OK;
-
-drop:
-       stats->tx_dropped++;
-       dev_kfree_skb(skb);
-       return NETDEV_TX_OK;
-}
-
-static int mtk_poll_rx(struct napi_struct *napi, int budget,
-                      struct mtk_eth *eth, u32 rx_intr)
-{
-       struct mtk_soc_data *soc = eth->soc;
-       struct mtk_rx_ring *ring = &eth->rx_ring[0];
-       int idx = ring->rx_calc_idx;
-       u32 checksum_bit;
-       struct sk_buff *skb;
-       u8 *data, *new_data;
-       struct mtk_rx_dma *rxd, trxd;
-       int done = 0, pad;
-
-       if (eth->soc->hw_features & NETIF_F_RXCSUM)
-               checksum_bit = soc->checksum_bit;
-       else
-               checksum_bit = 0;
-
-       if (eth->soc->rx_2b_offset)
-               pad = 0;
-       else
-               pad = NET_IP_ALIGN;
-
-       while (done < budget) {
-               struct net_device *netdev;
-               unsigned int pktlen;
-               dma_addr_t dma_addr;
-               int mac = 0;
-
-               idx = NEXT_RX_DESP_IDX(idx);
-               rxd = &ring->rx_dma[idx];
-               data = ring->rx_data[idx];
-
-               mtk_get_rxd(&trxd, rxd);
-               if (!(trxd.rxd2 & RX_DMA_DONE))
-                       break;
-
-               /* find out which mac the packet come from. values start at 1 */
-               if (eth->soc->mac_count > 1) {
-                       mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
-                             RX_DMA_FPORT_MASK;
-                       mac--;
-                       if (mac < 0 || mac >= eth->soc->mac_count)
-                               goto release_desc;
-               }
-
-               netdev = eth->netdev[mac];
-
-               /* alloc new buffer */
-               new_data = napi_alloc_frag(ring->frag_size);
-               if (unlikely(!new_data || !netdev)) {
-                       netdev->stats.rx_dropped++;
-                       goto release_desc;
-               }
-               dma_addr = dma_map_single(&netdev->dev,
-                                         new_data + NET_SKB_PAD + pad,
-                                         ring->rx_buf_size,
-                                         DMA_FROM_DEVICE);
-               if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
-                       skb_free_frag(new_data);
-                       goto release_desc;
-               }
-
-               /* receive data */
-               skb = build_skb(data, ring->frag_size);
-               if (unlikely(!skb)) {
-                       put_page(virt_to_head_page(new_data));
-                       goto release_desc;
-               }
-               skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
-
-               dma_unmap_single(&netdev->dev, trxd.rxd1,
-                                ring->rx_buf_size, DMA_FROM_DEVICE);
-               pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
-               skb->dev = netdev;
-               skb_put(skb, pktlen);
-               if (trxd.rxd4 & checksum_bit)
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-               else
-                       skb_checksum_none_assert(skb);
-               skb->protocol = eth_type_trans(skb, netdev);
-
-               netdev->stats.rx_packets++;
-               netdev->stats.rx_bytes += pktlen;
-
-               if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
-                   RX_DMA_VID(trxd.rxd3))
-                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
-                                              RX_DMA_VID(trxd.rxd3));
-               napi_gro_receive(napi, skb);
-
-               ring->rx_data[idx] = new_data;
-               rxd->rxd1 = (unsigned int)dma_addr;
-
-release_desc:
-               if (eth->soc->rx_sg_dma)
-                       rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
-               else
-                       rxd->rxd2 = RX_DMA_LSO;
-
-               ring->rx_calc_idx = idx;
-               /* make sure that all changes to the dma ring are flushed before
-                * we continue
-                */
-               wmb();
-               if (eth->soc->dma_type == MTK_QDMA)
-                       mtk_w32(eth, ring->rx_calc_idx, MTK_QRX_CRX_IDX0);
-               else
-                       mtk_reg_w32(eth, ring->rx_calc_idx,
-                                   MTK_REG_RX_CALC_IDX0);
-               done++;
-       }
-
-       if (done < budget)
-               mtk_irq_ack(eth, rx_intr);
-
-       return done;
-}
-
-static int mtk_pdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
-{
-       struct sk_buff *skb;
-       struct mtk_tx_buf *tx_buf;
-       int done = 0;
-       u32 idx, hwidx;
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       unsigned int bytes = 0;
-
-       idx = ring->tx_free_idx;
-       hwidx = mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0);
-
-       while ((idx != hwidx) && budget) {
-               tx_buf = &ring->tx_buf[idx];
-               skb = tx_buf->skb;
-
-               if (!skb)
-                       break;
-
-               if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
-                       bytes += skb->len;
-                       done++;
-                       budget--;
-               }
-               mtk_txd_unmap(eth->dev, tx_buf);
-               idx = NEXT_TX_DESP_IDX(idx);
-       }
-       ring->tx_free_idx = idx;
-       atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-
-       /* read hw index again make sure no new tx packet */
-       if (idx != hwidx || idx != mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0))
-               *tx_again = 1;
-
-       if (done)
-               netdev_completed_queue(*eth->netdev, done, bytes);
-
-       return done;
-}
-
-static int mtk_qdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
-{
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       struct mtk_tx_dma *desc;
-       struct sk_buff *skb;
-       struct mtk_tx_buf *tx_buf;
-       int total = 0, done[MTK_MAX_DEVS];
-       unsigned int bytes[MTK_MAX_DEVS];
-       u32 cpu, dma;
-       int i;
-
-       memset(done, 0, sizeof(done));
-       memset(bytes, 0, sizeof(bytes));
-
-       cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
-       dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
-
-       desc = mtk_qdma_phys_to_virt(ring, cpu);
-
-       while ((cpu != dma) && budget) {
-               u32 next_cpu = desc->txd2;
-               int mac;
-
-               desc = mtk_tx_next_qdma(ring, desc);
-               if ((desc->txd3 & QDMA_TX_OWNER_CPU) == 0)
-                       break;
-
-               mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
-                      TX_DMA_FPORT_MASK;
-               mac--;
-
-               tx_buf = mtk_desc_to_tx_buf(ring, desc);
-               skb = tx_buf->skb;
-               if (!skb)
-                       break;
-
-               if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
-                       bytes[mac] += skb->len;
-                       done[mac]++;
-                       budget--;
-               }
-               mtk_txd_unmap(eth->dev, tx_buf);
-
-               ring->tx_last_free->txd2 = next_cpu;
-               ring->tx_last_free = desc;
-               atomic_inc(&ring->tx_free_count);
-
-               cpu = next_cpu;
-       }
-
-       mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
-
-       /* read hw index again make sure no new tx packet */
-       if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
-               *tx_again = true;
-
-       for (i = 0; i < eth->soc->mac_count; i++) {
-               if (!done[i])
-                       continue;
-               netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
-               total += done[i];
-       }
-
-       return total;
-}
-
-static int mtk_poll_tx(struct mtk_eth *eth, int budget, u32 tx_intr,
-                      bool *tx_again)
-{
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       struct net_device *netdev = eth->netdev[0];
-       int done;
-
-       done = eth->tx_ring.tx_poll(eth, budget, tx_again);
-       if (!*tx_again)
-               mtk_irq_ack(eth, tx_intr);
-
-       if (!done)
-               return 0;
-
-       smp_mb();
-       if (unlikely(!netif_queue_stopped(netdev)))
-               return done;
-
-       if (atomic_read(&ring->tx_free_count) > ring->tx_thresh)
-               netif_wake_queue(netdev);
-
-       return done;
-}
-
-static void mtk_stats_update(struct mtk_eth *eth)
-{
-       int i;
-
-       for (i = 0; i < eth->soc->mac_count; i++) {
-               if (!eth->mac[i] || !eth->mac[i]->hw_stats)
-                       continue;
-               if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
-                       mtk_stats_update_mac(eth->mac[i]);
-                       spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
-               }
-       }
-}
-
-static int mtk_poll(struct napi_struct *napi, int budget)
-{
-       struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
-       u32 status, mtk_status, mask, tx_intr, rx_intr, status_intr;
-       int tx_done, rx_done;
-       bool tx_again = false;
-
-       status = mtk_irq_pending(eth);
-       mtk_status = mtk_irq_pending_status(eth);
-       tx_intr = eth->soc->tx_int;
-       rx_intr = eth->soc->rx_int;
-       status_intr = eth->soc->status_int;
-       tx_done = 0;
-       rx_done = 0;
-       tx_again = 0;
-
-       if (status & tx_intr)
-               tx_done = mtk_poll_tx(eth, budget, tx_intr, &tx_again);
-
-       if (status & rx_intr)
-               rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
-
-       if (unlikely(mtk_status & status_intr)) {
-               mtk_stats_update(eth);
-               mtk_irq_ack_status(eth, status_intr);
-       }
-
-       if (unlikely(netif_msg_intr(eth))) {
-               mask = mtk_irq_enabled(eth);
-               netdev_info(eth->netdev[0],
-                           "done tx %d, rx %d, intr 0x%08x/0x%x\n",
-                           tx_done, rx_done, status, mask);
-       }
-
-       if (tx_again || rx_done == budget)
-               return budget;
-
-       status = mtk_irq_pending(eth);
-       if (status & (tx_intr | rx_intr))
-               return budget;
-
-       napi_complete(napi);
-       mtk_irq_enable(eth, tx_intr | rx_intr);
-
-       return rx_done;
-}
-
-static int mtk_pdma_tx_alloc(struct mtk_eth *eth)
-{
-       int i;
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-
-       ring->tx_ring_size = eth->soc->dma_ring_size;
-       ring->tx_free_idx = 0;
-       ring->tx_next_idx = 0;
-       ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
-                             MAX_SKB_FRAGS);
-
-       ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
-                              GFP_KERNEL);
-       if (!ring->tx_buf)
-               goto no_tx_mem;
-
-       ring->tx_dma =
-               dma_alloc_coherent(eth->dev,
-                                  ring->tx_ring_size * sizeof(*ring->tx_dma),
-                                  &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO);
-       if (!ring->tx_dma)
-               goto no_tx_mem;
-
-       for (i = 0; i < ring->tx_ring_size; i++) {
-               ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF;
-               ring->tx_dma[i].txd4 = eth->soc->txd4;
-       }
-
-       atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-       ring->tx_map = mtk_pdma_tx_map;
-       ring->tx_poll = mtk_pdma_tx_poll;
-       ring->tx_clean = mtk_pdma_tx_clean;
-
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-
-       mtk_reg_w32(eth, ring->tx_phys, MTK_REG_TX_BASE_PTR0);
-       mtk_reg_w32(eth, ring->tx_ring_size, MTK_REG_TX_MAX_CNT0);
-       mtk_reg_w32(eth, 0, MTK_REG_TX_CTX_IDX0);
-       mtk_reg_w32(eth, MTK_PST_DTX_IDX0, MTK_REG_PDMA_RST_CFG);
-
-       return 0;
-
-no_tx_mem:
-       return -ENOMEM;
-}
-
-static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
-{
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       int i, sz = sizeof(*ring->tx_dma);
-
-       ring->tx_ring_size = eth->soc->dma_ring_size;
-       ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
-                              GFP_KERNEL);
-       if (!ring->tx_buf)
-               goto no_tx_mem;
-
-       ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz,
-                                         &ring->tx_phys,
-                                         GFP_ATOMIC | __GFP_ZERO);
-       if (!ring->tx_dma)
-               goto no_tx_mem;
-
-       for (i = 0; i < ring->tx_ring_size; i++) {
-               int next = (i + 1) % ring->tx_ring_size;
-               u32 next_ptr = ring->tx_phys + next * sz;
-
-               ring->tx_dma[i].txd2 = next_ptr;
-               ring->tx_dma[i].txd3 = TX_DMA_DESP2_DEF;
-       }
-
-       atomic_set(&ring->tx_free_count, ring->tx_ring_size - 2);
-       ring->tx_next_free = &ring->tx_dma[0];
-       ring->tx_last_free = &ring->tx_dma[ring->tx_ring_size - 2];
-       ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
-                             MAX_SKB_FRAGS);
-
-       ring->tx_map = mtk_qdma_tx_map;
-       ring->tx_poll = mtk_qdma_tx_poll;
-       ring->tx_clean = mtk_qdma_tx_clean;
-
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-
-       mtk_w32(eth, ring->tx_phys, MTK_QTX_CTX_PTR);
-       mtk_w32(eth, ring->tx_phys, MTK_QTX_DTX_PTR);
-       mtk_w32(eth,
-               ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
-               MTK_QTX_CRX_PTR);
-       mtk_w32(eth,
-               ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
-               MTK_QTX_DRX_PTR);
-
-       return 0;
-
-no_tx_mem:
-       return -ENOMEM;
-}
-
-static int mtk_qdma_init(struct mtk_eth *eth, int ring)
-{
-       int err;
-
-       err = mtk_init_fq_dma(eth);
-       if (err)
-               return err;
-
-       err = mtk_qdma_tx_alloc_tx(eth);
-       if (err)
-               return err;
-
-       err = mtk_dma_rx_alloc(eth, &eth->rx_ring[ring]);
-       if (err)
-               return err;
-
-       mtk_w32(eth, eth->rx_ring[ring].rx_phys, MTK_QRX_BASE_PTR0);
-       mtk_w32(eth, eth->rx_ring[ring].rx_ring_size, MTK_QRX_MAX_CNT0);
-       mtk_w32(eth, eth->rx_ring[ring].rx_calc_idx, MTK_QRX_CRX_IDX0);
-       mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
-       mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
-
-       /* Enable random early drop and set drop threshold automatically */
-       mtk_w32(eth, 0x174444, MTK_QDMA_FC_THRES);
-       mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
-
-       return 0;
-}
-
-static int mtk_pdma_qdma_init(struct mtk_eth *eth)
-{
-       int err = mtk_qdma_init(eth, 1);
-
-       if (err)
-               return err;
-
-       err = mtk_dma_rx_alloc(eth, &eth->rx_ring[0]);
-       if (err)
-               return err;
-
-       mtk_reg_w32(eth, eth->rx_ring[0].rx_phys, MTK_REG_RX_BASE_PTR0);
-       mtk_reg_w32(eth, eth->rx_ring[0].rx_ring_size, MTK_REG_RX_MAX_CNT0);
-       mtk_reg_w32(eth, eth->rx_ring[0].rx_calc_idx, MTK_REG_RX_CALC_IDX0);
-       mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
-
-       return 0;
-}
-
-static int mtk_pdma_init(struct mtk_eth *eth)
-{
-       struct mtk_rx_ring *ring = &eth->rx_ring[0];
-       int err;
-
-       err = mtk_pdma_tx_alloc(eth);
-       if (err)
-               return err;
-
-       err = mtk_dma_rx_alloc(eth, ring);
-       if (err)
-               return err;
-
-       mtk_reg_w32(eth, ring->rx_phys, MTK_REG_RX_BASE_PTR0);
-       mtk_reg_w32(eth, ring->rx_ring_size, MTK_REG_RX_MAX_CNT0);
-       mtk_reg_w32(eth, ring->rx_calc_idx, MTK_REG_RX_CALC_IDX0);
-       mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
-
-       return 0;
-}
-
-static void mtk_dma_free(struct mtk_eth *eth)
-{
-       int i;
-
-       for (i = 0; i < eth->soc->mac_count; i++)
-               if (eth->netdev[i])
-                       netdev_reset_queue(eth->netdev[i]);
-       eth->tx_ring.tx_clean(eth);
-       mtk_clean_rx(eth, &eth->rx_ring[0]);
-       mtk_clean_rx(eth, &eth->rx_ring[1]);
-       kfree(eth->scratch_head);
-}
-
-static void mtk_tx_timeout(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-
-       eth->netdev[mac->id]->stats.tx_errors++;
-       netif_err(eth, tx_err, dev,
-                 "transmit timed out\n");
-       if (eth->soc->dma_type & MTK_PDMA) {
-               netif_info(eth, drv, dev, "pdma_cfg:%08x\n",
-                          mtk_reg_r32(eth, MTK_REG_PDMA_GLO_CFG));
-               netif_info(eth, drv, dev,
-                          "tx_ring=%d, base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n",
-                          0, mtk_reg_r32(eth, MTK_REG_TX_BASE_PTR0),
-                          mtk_reg_r32(eth, MTK_REG_TX_MAX_CNT0),
-                          mtk_reg_r32(eth, MTK_REG_TX_CTX_IDX0),
-                          mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0),
-                          ring->tx_free_idx,
-                          ring->tx_next_idx);
-       }
-       if (eth->soc->dma_type & MTK_QDMA) {
-               netif_info(eth, drv, dev, "qdma_cfg:%08x\n",
-                          mtk_r32(eth, MTK_QDMA_GLO_CFG));
-               netif_info(eth, drv, dev,
-                          "tx_ring=%d, ctx=%08x, dtx=%08x, crx=%08x, drx=%08x, free=%hu\n",
-                          0, mtk_r32(eth, MTK_QTX_CTX_PTR),
-                          mtk_r32(eth, MTK_QTX_DTX_PTR),
-                          mtk_r32(eth, MTK_QTX_CRX_PTR),
-                          mtk_r32(eth, MTK_QTX_DRX_PTR),
-                          atomic_read(&ring->tx_free_count));
-       }
-       netif_info(eth, drv, dev,
-                  "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n",
-                  0, mtk_reg_r32(eth, MTK_REG_RX_BASE_PTR0),
-                  mtk_reg_r32(eth, MTK_REG_RX_MAX_CNT0),
-                  mtk_reg_r32(eth, MTK_REG_RX_CALC_IDX0),
-                  mtk_reg_r32(eth, MTK_REG_RX_DRX_IDX0));
-
-       schedule_work(&mac->pending_work);
-}
-
-static irqreturn_t mtk_handle_irq(int irq, void *_eth)
-{
-       struct mtk_eth *eth = _eth;
-       u32 status, int_mask;
-
-       status = mtk_irq_pending(eth);
-       if (unlikely(!status))
-               return IRQ_NONE;
-
-       int_mask = (eth->soc->rx_int | eth->soc->tx_int);
-       if (likely(status & int_mask)) {
-               if (likely(napi_schedule_prep(&eth->rx_napi)))
-                       __napi_schedule(&eth->rx_napi);
-       } else {
-               mtk_irq_ack(eth, status);
-       }
-       mtk_irq_disable(eth, int_mask);
-
-       return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void mtk_poll_controller(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       u32 int_mask = eth->soc->tx_int | eth->soc->rx_int;
-
-       mtk_irq_disable(eth, int_mask);
-       mtk_handle_irq(dev->irq, dev);
-       mtk_irq_enable(eth, int_mask);
-}
-#endif
-
-int mtk_set_clock_cycle(struct mtk_eth *eth)
-{
-       unsigned long sysclk = eth->sysclk;
-
-       sysclk /= MTK_US_CYC_CNT_DIVISOR;
-       sysclk <<= MTK_US_CYC_CNT_SHIFT;
-
-       mtk_w32(eth, (mtk_r32(eth, MTK_GLO_CFG) &
-                       ~(MTK_US_CYC_CNT_MASK << MTK_US_CYC_CNT_SHIFT)) |
-                       sysclk,
-                       MTK_GLO_CFG);
-       return 0;
-}
-
-void mtk_fwd_config(struct mtk_eth *eth)
-{
-       u32 fwd_cfg;
-
-       fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
-
-       /* disable jumbo frame */
-       if (eth->soc->jumbo_frame)
-               fwd_cfg &= ~MTK_GDM1_JMB_EN;
-
-       /* set unicast/multicast/broadcast frame to cpu */
-       fwd_cfg &= ~0xffff;
-
-       mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
-}
-
-void mtk_csum_config(struct mtk_eth *eth)
-{
-       if (eth->soc->hw_features & NETIF_F_RXCSUM)
-               mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) |
-                       (MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
-                       MTK_GDMA1_FWD_CFG);
-       else
-               mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) &
-                       ~(MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
-                       MTK_GDMA1_FWD_CFG);
-       if (eth->soc->hw_features & NETIF_F_IP_CSUM)
-               mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) |
-                       (MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
-                       MTK_CDMA_CSG_CFG);
-       else
-               mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) &
-                       ~(MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
-                       MTK_CDMA_CSG_CFG);
-}
-
-static int mtk_start_dma(struct mtk_eth *eth)
-{
-       unsigned long flags;
-       u32 val;
-       int err;
-
-       if (eth->soc->dma_type == MTK_PDMA)
-               err = mtk_pdma_init(eth);
-       else if (eth->soc->dma_type == MTK_QDMA)
-               err = mtk_qdma_init(eth, 0);
-       else
-               err = mtk_pdma_qdma_init(eth);
-       if (err) {
-               mtk_dma_free(eth);
-               return err;
-       }
-
-       spin_lock_irqsave(&eth->page_lock, flags);
-
-       val = MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN;
-       if (eth->soc->rx_2b_offset)
-               val |= MTK_RX_2B_OFFSET;
-       val |= eth->soc->pdma_glo_cfg;
-
-       if (eth->soc->dma_type & MTK_PDMA)
-               mtk_reg_w32(eth, val, MTK_REG_PDMA_GLO_CFG);
-
-       if (eth->soc->dma_type & MTK_QDMA)
-               mtk_w32(eth, val, MTK_QDMA_GLO_CFG);
-
-       spin_unlock_irqrestore(&eth->page_lock, flags);
-
-       return 0;
-}
-
-static int mtk_open(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-
-       dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
-
-       if (!atomic_read(&eth->dma_refcnt)) {
-               int err = mtk_start_dma(eth);
-
-               if (err)
-                       return err;
-
-               napi_enable(&eth->rx_napi);
-               mtk_irq_enable(eth, eth->soc->tx_int | eth->soc->rx_int);
-       }
-       atomic_inc(&eth->dma_refcnt);
-
-       if (eth->phy)
-               eth->phy->start(mac);
-
-       if (eth->soc->has_carrier && eth->soc->has_carrier(eth))
-               netif_carrier_on(dev);
-
-       netif_start_queue(dev);
-       eth->soc->fwd_config(eth);
-
-       return 0;
-}
-
-static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
-{
-       unsigned long flags;
-       u32 val;
-       int i;
-
-       /* stop the dma enfine */
-       spin_lock_irqsave(&eth->page_lock, flags);
-       val = mtk_r32(eth, glo_cfg);
-       mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
-               glo_cfg);
-       spin_unlock_irqrestore(&eth->page_lock, flags);
-
-       /* wait for dma stop */
-       for (i = 0; i < 10; i++) {
-               val = mtk_r32(eth, glo_cfg);
-               if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
-                       msleep(20);
-                       continue;
-               }
-               break;
-       }
-}
-
-static int mtk_stop(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-
-       netif_tx_disable(dev);
-       if (eth->phy)
-               eth->phy->stop(mac);
-
-       if (!atomic_dec_and_test(&eth->dma_refcnt))
-               return 0;
-
-       mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
-       napi_disable(&eth->rx_napi);
-
-       if (eth->soc->dma_type & MTK_PDMA)
-               mtk_stop_dma(eth, mtk_reg_table[MTK_REG_PDMA_GLO_CFG]);
-
-       if (eth->soc->dma_type & MTK_QDMA)
-               mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
-
-       mtk_dma_free(eth);
-
-       return 0;
-}
-
-static int __init mtk_init_hw(struct mtk_eth *eth)
-{
-       int i, err;
-
-       eth->soc->reset_fe(eth);
-
-       if (eth->soc->switch_init)
-               if (eth->soc->switch_init(eth)) {
-                       dev_err(eth->dev, "failed to initialize switch core\n");
-                       return -ENODEV;
-               }
-
-       err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
-                              dev_name(eth->dev), eth);
-       if (err)
-               return err;
-
-       err = mtk_mdio_init(eth);
-       if (err)
-               return err;
-
-       /* disable delay and normal interrupt */
-       mtk_reg_w32(eth, 0, MTK_REG_DLY_INT_CFG);
-       if (eth->soc->dma_type & MTK_QDMA)
-               mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
-       mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
-
-       /* frame engine will push VLAN tag regarding to VIDX field in Tx desc */
-       if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
-               for (i = 0; i < 16; i += 2)
-                       mtk_w32(eth, ((i + 1) << 16) + i,
-                               mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
-                               (i * 2));
-
-       if (eth->soc->fwd_config(eth))
-               dev_err(eth->dev, "unable to get clock\n");
-
-       if (mtk_reg_table[MTK_REG_MTK_RST_GL]) {
-               mtk_reg_w32(eth, 1, MTK_REG_MTK_RST_GL);
-               mtk_reg_w32(eth, 0, MTK_REG_MTK_RST_GL);
-       }
-
-       return 0;
-}
-
-static int __init mtk_init(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       struct device_node *port;
-       const char *mac_addr;
-       int err;
-
-       mac_addr = of_get_mac_address(mac->of_node);
-       if (mac_addr)
-               ether_addr_copy(dev->dev_addr, mac_addr);
-
-       /* If the mac address is invalid, use random mac address  */
-       if (!is_valid_ether_addr(dev->dev_addr)) {
-               eth_hw_addr_random(dev);
-               dev_err(eth->dev, "generated random MAC address %pM\n",
-                       dev->dev_addr);
-       }
-       mac->hw->soc->set_mac(mac, dev->dev_addr);
-
-       if (eth->soc->port_init)
-               for_each_child_of_node(mac->of_node, port)
-                       if (of_device_is_compatible(port,
-                                                   "mediatek,eth-port") &&
-                           of_device_is_available(port))
-                               eth->soc->port_init(eth, mac, port);
-
-       if (eth->phy) {
-               err = eth->phy->connect(mac);
-               if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
-static void mtk_uninit(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-
-       if (eth->phy)
-               eth->phy->disconnect(mac);
-       mtk_mdio_cleanup(eth);
-
-       mtk_irq_disable(eth, ~0);
-       free_irq(dev->irq, dev);
-}
-
-static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       if (!mac->phy_dev)
-               return -ENODEV;
-
-       switch (cmd) {
-       case SIOCGMIIPHY:
-       case SIOCGMIIREG:
-       case SIOCSMIIREG:
-               return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
-       default:
-               break;
-       }
-
-       return -EOPNOTSUPP;
-}
-
-static int mtk_change_mtu(struct net_device *dev, int new_mtu)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       int frag_size, old_mtu;
-       u32 fwd_cfg;
-
-       if (!eth->soc->jumbo_frame)
-               return eth_change_mtu(dev, new_mtu);
-
-       frag_size = mtk_max_frag_size(new_mtu);
-       if (new_mtu < 68 || frag_size > PAGE_SIZE)
-               return -EINVAL;
-
-       old_mtu = dev->mtu;
-       dev->mtu = new_mtu;
-
-       /* return early if the buffer sizes will not change */
-       if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
-               return 0;
-       if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN)
-               return 0;
-
-       if (new_mtu <= ETH_DATA_LEN)
-               eth->rx_ring[0].frag_size = mtk_max_frag_size(ETH_DATA_LEN);
-       else
-               eth->rx_ring[0].frag_size = PAGE_SIZE;
-       eth->rx_ring[0].rx_buf_size =
-                               mtk_max_buf_size(eth->rx_ring[0].frag_size);
-
-       if (!netif_running(dev))
-               return 0;
-
-       mtk_stop(dev);
-       fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
-       if (new_mtu <= ETH_DATA_LEN) {
-               fwd_cfg &= ~MTK_GDM1_JMB_EN;
-       } else {
-               fwd_cfg &= ~(MTK_GDM1_JMB_LEN_MASK << MTK_GDM1_JMB_LEN_SHIFT);
-               fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) <<
-                               MTK_GDM1_JMB_LEN_SHIFT) | MTK_GDM1_JMB_EN;
-       }
-       mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
-
-       return mtk_open(dev);
-}
-
-static void mtk_pending_work(struct work_struct *work)
-{
-       struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
-       struct mtk_eth *eth = mac->hw;
-       struct net_device *dev = eth->netdev[mac->id];
-       int err;
-
-       rtnl_lock();
-       mtk_stop(dev);
-
-       err = mtk_open(dev);
-       if (err) {
-               netif_alert(eth, ifup, dev,
-                           "Driver up/down cycle failed, closing device.\n");
-               dev_close(dev);
-       }
-       rtnl_unlock();
-}
-
-static int mtk_cleanup(struct mtk_eth *eth)
-{
-       int i;
-
-       for (i = 0; i < eth->soc->mac_count; i++) {
-               struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
-
-               if (!eth->netdev[i])
-                       continue;
-
-               unregister_netdev(eth->netdev[i]);
-               free_netdev(eth->netdev[i]);
-               cancel_work_sync(&mac->pending_work);
-       }
-
-       return 0;
-}
-
-static const struct net_device_ops mtk_netdev_ops = {
-       .ndo_init               = mtk_init,
-       .ndo_uninit             = mtk_uninit,
-       .ndo_open               = mtk_open,
-       .ndo_stop               = mtk_stop,
-       .ndo_start_xmit         = mtk_start_xmit,
-       .ndo_set_mac_address    = mtk_set_mac_address,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_do_ioctl           = mtk_do_ioctl,
-       .ndo_change_mtu         = mtk_change_mtu,
-       .ndo_tx_timeout         = mtk_tx_timeout,
-       .ndo_get_stats64        = mtk_get_stats64,
-       .ndo_vlan_rx_add_vid    = mtk_vlan_rx_add_vid,
-       .ndo_vlan_rx_kill_vid   = mtk_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = mtk_poll_controller,
-#endif
-};
-
-static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
-{
-       struct mtk_mac *mac;
-       const __be32 *_id = of_get_property(np, "reg", NULL);
-       int id, err;
-
-       if (!_id) {
-               dev_err(eth->dev, "missing mac id\n");
-               return -EINVAL;
-       }
-       id = be32_to_cpup(_id);
-       if (id >= eth->soc->mac_count || eth->netdev[id]) {
-               dev_err(eth->dev, "%d is not a valid mac id\n", id);
-               return -EINVAL;
-       }
-
-       eth->netdev[id] = alloc_etherdev(sizeof(*mac));
-       if (!eth->netdev[id]) {
-               dev_err(eth->dev, "alloc_etherdev failed\n");
-               return -ENOMEM;
-       }
-       mac = netdev_priv(eth->netdev[id]);
-       eth->mac[id] = mac;
-       mac->id = id;
-       mac->hw = eth;
-       mac->of_node = np;
-       INIT_WORK(&mac->pending_work, mtk_pending_work);
-
-       if (mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]) {
-               mac->hw_stats = devm_kzalloc(eth->dev,
-                                            sizeof(*mac->hw_stats),
-                                            GFP_KERNEL);
-               if (!mac->hw_stats) {
-                       err = -ENOMEM;
-                       goto free_netdev;
-               }
-               spin_lock_init(&mac->hw_stats->stats_lock);
-               mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
-       }
-
-       SET_NETDEV_DEV(eth->netdev[id], eth->dev);
-       eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
-       eth->netdev[id]->base_addr = (unsigned long)eth->base;
-
-       if (eth->soc->init_data)
-               eth->soc->init_data(eth->soc, eth->netdev[id]);
-
-       eth->netdev[id]->vlan_features = eth->soc->hw_features &
-               ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
-       eth->netdev[id]->features |= eth->soc->hw_features;
-
-       if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
-               eth->netdev[id]->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
-       mtk_set_ethtool_ops(eth->netdev[id]);
-
-       err = register_netdev(eth->netdev[id]);
-       if (err) {
-               dev_err(eth->dev, "error bringing up device\n");
-               err = -ENOMEM;
-               goto free_netdev;
-       }
-       eth->netdev[id]->irq = eth->irq;
-       netif_info(eth, probe, eth->netdev[id],
-                  "mediatek frame engine at 0x%08lx, irq %d\n",
-                  eth->netdev[id]->base_addr, eth->netdev[id]->irq);
-
-       return 0;
-
-free_netdev:
-       free_netdev(eth->netdev[id]);
-       return err;
-}
-
-static int mtk_probe(struct platform_device *pdev)
-{
-       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       const struct of_device_id *match;
-       struct device_node *mac_np;
-       struct mtk_soc_data *soc;
-       struct mtk_eth *eth;
-       struct clk *sysclk;
-       int err;
-
-       device_reset(&pdev->dev);
-
-       match = of_match_device(of_mtk_match, &pdev->dev);
-       soc = (struct mtk_soc_data *)match->data;
-
-       if (soc->reg_table)
-               mtk_reg_table = soc->reg_table;
-
-       eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
-       if (!eth)
-               return -ENOMEM;
-
-       eth->base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(eth->base))
-               return PTR_ERR(eth->base);
-
-       spin_lock_init(&eth->page_lock);
-
-       eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
-                                                     "mediatek,ethsys");
-       if (IS_ERR(eth->ethsys))
-               return PTR_ERR(eth->ethsys);
-
-       eth->irq = platform_get_irq(pdev, 0);
-       if (eth->irq < 0) {
-               dev_err(&pdev->dev, "no IRQ resource found\n");
-               return -ENXIO;
-       }
-
-       sysclk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(sysclk)) {
-               dev_err(&pdev->dev,
-                       "the clock is not defined in the devicetree\n");
-               return -ENXIO;
-       }
-       eth->sysclk = clk_get_rate(sysclk);
-
-       eth->switch_np = of_parse_phandle(pdev->dev.of_node,
-                                         "mediatek,switch", 0);
-       if (soc->has_switch && !eth->switch_np) {
-               dev_err(&pdev->dev, "failed to read switch phandle\n");
-               return -ENODEV;
-       }
-
-       eth->dev = &pdev->dev;
-       eth->soc = soc;
-       eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
-
-       err = mtk_init_hw(eth);
-       if (err)
-               return err;
-
-       if (eth->soc->mac_count > 1) {
-               for_each_child_of_node(pdev->dev.of_node, mac_np) {
-                       if (!of_device_is_compatible(mac_np,
-                                                    "mediatek,eth-mac"))
-                               continue;
-
-                       if (!of_device_is_available(mac_np))
-                               continue;
-
-                       err = mtk_add_mac(eth, mac_np);
-                       if (err)
-                               goto err_free_dev;
-               }
-
-               init_dummy_netdev(&eth->dummy_dev);
-               netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
-                              soc->napi_weight);
-       } else {
-               err = mtk_add_mac(eth, pdev->dev.of_node);
-               if (err)
-                       goto err_free_dev;
-               netif_napi_add(eth->netdev[0], &eth->rx_napi, mtk_poll,
-                              soc->napi_weight);
-       }
-
-       platform_set_drvdata(pdev, eth);
-
-       return 0;
-
-err_free_dev:
-       mtk_cleanup(eth);
-       return err;
-}
-
-static int mtk_remove(struct platform_device *pdev)
-{
-       struct mtk_eth *eth = platform_get_drvdata(pdev);
-
-       netif_napi_del(&eth->rx_napi);
-       mtk_cleanup(eth);
-       platform_set_drvdata(pdev, NULL);
-
-       return 0;
-}
-
-static struct platform_driver mtk_driver = {
-       .probe = mtk_probe,
-       .remove = mtk_remove,
-       .driver = {
-               .name = "mtk_soc_eth",
-               .of_match_table = of_mtk_match,
-       },
-};
-
-module_platform_driver(mtk_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
-MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.h b/drivers/staging/mt7621-eth/mtk_eth_soc.h
deleted file mode 100644 (file)
index e6ed804..0000000
+++ /dev/null
@@ -1,716 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef MTK_ETH_H
-#define MTK_ETH_H
-
-#include <linux/mii.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/dma-mapping.h>
-#include <linux/phy.h>
-#include <linux/ethtool.h>
-#include <linux/version.h>
-#include <linux/atomic.h>
-
-/* these registers have different offsets depending on the SoC. we use a lookup
- * table for these
- */
-enum mtk_reg {
-       MTK_REG_PDMA_GLO_CFG = 0,
-       MTK_REG_PDMA_RST_CFG,
-       MTK_REG_DLY_INT_CFG,
-       MTK_REG_TX_BASE_PTR0,
-       MTK_REG_TX_MAX_CNT0,
-       MTK_REG_TX_CTX_IDX0,
-       MTK_REG_TX_DTX_IDX0,
-       MTK_REG_RX_BASE_PTR0,
-       MTK_REG_RX_MAX_CNT0,
-       MTK_REG_RX_CALC_IDX0,
-       MTK_REG_RX_DRX_IDX0,
-       MTK_REG_MTK_INT_ENABLE,
-       MTK_REG_MTK_INT_STATUS,
-       MTK_REG_MTK_DMA_VID_BASE,
-       MTK_REG_MTK_COUNTER_BASE,
-       MTK_REG_MTK_RST_GL,
-       MTK_REG_MTK_INT_STATUS2,
-       MTK_REG_COUNT
-};
-
-/* delayed interrupt bits */
-#define MTK_DELAY_EN_INT       0x80
-#define MTK_DELAY_MAX_INT      0x04
-#define MTK_DELAY_MAX_TOUT     0x04
-#define MTK_DELAY_TIME         20
-#define MTK_DELAY_CHAN         (((MTK_DELAY_EN_INT | MTK_DELAY_MAX_INT) << 8) \
-                                | MTK_DELAY_MAX_TOUT)
-#define MTK_DELAY_INIT         ((MTK_DELAY_CHAN << 16) | MTK_DELAY_CHAN)
-#define MTK_PSE_FQFC_CFG_INIT  0x80504000
-#define MTK_PSE_FQFC_CFG_256Q  0xff908000
-
-/* interrupt bits */
-#define MTK_CNT_PPE_AF         BIT(31)
-#define MTK_CNT_GDM_AF         BIT(29)
-#define MTK_PSE_P2_FC          BIT(26)
-#define MTK_PSE_BUF_DROP       BIT(24)
-#define MTK_GDM_OTHER_DROP     BIT(23)
-#define MTK_PSE_P1_FC          BIT(22)
-#define MTK_PSE_P0_FC          BIT(21)
-#define MTK_PSE_FQ_EMPTY       BIT(20)
-#define MTK_GE1_STA_CHG                BIT(18)
-#define MTK_TX_COHERENT                BIT(17)
-#define MTK_RX_COHERENT                BIT(16)
-#define MTK_TX_DONE_INT3       BIT(11)
-#define MTK_TX_DONE_INT2       BIT(10)
-#define MTK_TX_DONE_INT1       BIT(9)
-#define MTK_TX_DONE_INT0       BIT(8)
-#define MTK_RX_DONE_INT0       BIT(2)
-#define MTK_TX_DLY_INT         BIT(1)
-#define MTK_RX_DLY_INT         BIT(0)
-
-#define MTK_RX_DONE_INT                MTK_RX_DONE_INT0
-#define MTK_TX_DONE_INT                (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
-                                MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
-
-#define RT5350_RX_DLY_INT      BIT(30)
-#define RT5350_TX_DLY_INT      BIT(28)
-#define RT5350_RX_DONE_INT1    BIT(17)
-#define RT5350_RX_DONE_INT0    BIT(16)
-#define RT5350_TX_DONE_INT3    BIT(3)
-#define RT5350_TX_DONE_INT2    BIT(2)
-#define RT5350_TX_DONE_INT1    BIT(1)
-#define RT5350_TX_DONE_INT0    BIT(0)
-
-#define RT5350_RX_DONE_INT     (RT5350_RX_DONE_INT0 | RT5350_RX_DONE_INT1)
-#define RT5350_TX_DONE_INT     (RT5350_TX_DONE_INT0 | RT5350_TX_DONE_INT1 | \
-                                RT5350_TX_DONE_INT2 | RT5350_TX_DONE_INT3)
-
-/* registers */
-#define MTK_GDMA_OFFSET                0x0020
-#define MTK_PSE_OFFSET         0x0040
-#define MTK_GDMA2_OFFSET       0x0060
-#define MTK_CDMA_OFFSET                0x0080
-#define MTK_DMA_VID0           0x00a8
-#define MTK_PDMA_OFFSET                0x0100
-#define MTK_PPE_OFFSET         0x0200
-#define MTK_CMTABLE_OFFSET     0x0400
-#define MTK_POLICYTABLE_OFFSET 0x1000
-
-#define MT7621_GDMA_OFFSET     0x0500
-#define MT7620_GDMA_OFFSET     0x0600
-
-#define RT5350_PDMA_OFFSET     0x0800
-#define RT5350_SDM_OFFSET      0x0c00
-
-#define MTK_MDIO_ACCESS                0x00
-#define MTK_MDIO_CFG           0x04
-#define MTK_GLO_CFG            0x08
-#define MTK_RST_GL             0x0C
-#define MTK_INT_STATUS         0x10
-#define MTK_INT_ENABLE         0x14
-#define MTK_MDIO_CFG2          0x18
-#define MTK_FOC_TS_T           0x1C
-
-#define        MTK_GDMA1_FWD_CFG       (MTK_GDMA_OFFSET + 0x00)
-#define MTK_GDMA1_SCH_CFG      (MTK_GDMA_OFFSET + 0x04)
-#define MTK_GDMA1_SHPR_CFG     (MTK_GDMA_OFFSET + 0x08)
-#define MTK_GDMA1_MAC_ADRL     (MTK_GDMA_OFFSET + 0x0C)
-#define MTK_GDMA1_MAC_ADRH     (MTK_GDMA_OFFSET + 0x10)
-
-#define        MTK_GDMA2_FWD_CFG       (MTK_GDMA2_OFFSET + 0x00)
-#define MTK_GDMA2_SCH_CFG      (MTK_GDMA2_OFFSET + 0x04)
-#define MTK_GDMA2_SHPR_CFG     (MTK_GDMA2_OFFSET + 0x08)
-#define MTK_GDMA2_MAC_ADRL     (MTK_GDMA2_OFFSET + 0x0C)
-#define MTK_GDMA2_MAC_ADRH     (MTK_GDMA2_OFFSET + 0x10)
-
-#define MTK_PSE_FQ_CFG         (MTK_PSE_OFFSET + 0x00)
-#define MTK_CDMA_FC_CFG                (MTK_PSE_OFFSET + 0x04)
-#define MTK_GDMA1_FC_CFG       (MTK_PSE_OFFSET + 0x08)
-#define MTK_GDMA2_FC_CFG       (MTK_PSE_OFFSET + 0x0C)
-
-#define MTK_CDMA_CSG_CFG       (MTK_CDMA_OFFSET + 0x00)
-#define MTK_CDMA_SCH_CFG       (MTK_CDMA_OFFSET + 0x04)
-
-#define        MT7621_GDMA_FWD_CFG(x)  (MT7621_GDMA_OFFSET + (x * 0x1000))
-
-/* FIXME this might be different for different SOCs */
-#define        MT7620_GDMA1_FWD_CFG    (MT7621_GDMA_OFFSET + 0x00)
-
-#define RT5350_TX_BASE_PTR0    (RT5350_PDMA_OFFSET + 0x00)
-#define RT5350_TX_MAX_CNT0     (RT5350_PDMA_OFFSET + 0x04)
-#define RT5350_TX_CTX_IDX0     (RT5350_PDMA_OFFSET + 0x08)
-#define RT5350_TX_DTX_IDX0     (RT5350_PDMA_OFFSET + 0x0C)
-#define RT5350_TX_BASE_PTR1    (RT5350_PDMA_OFFSET + 0x10)
-#define RT5350_TX_MAX_CNT1     (RT5350_PDMA_OFFSET + 0x14)
-#define RT5350_TX_CTX_IDX1     (RT5350_PDMA_OFFSET + 0x18)
-#define RT5350_TX_DTX_IDX1     (RT5350_PDMA_OFFSET + 0x1C)
-#define RT5350_TX_BASE_PTR2    (RT5350_PDMA_OFFSET + 0x20)
-#define RT5350_TX_MAX_CNT2     (RT5350_PDMA_OFFSET + 0x24)
-#define RT5350_TX_CTX_IDX2     (RT5350_PDMA_OFFSET + 0x28)
-#define RT5350_TX_DTX_IDX2     (RT5350_PDMA_OFFSET + 0x2C)
-#define RT5350_TX_BASE_PTR3    (RT5350_PDMA_OFFSET + 0x30)
-#define RT5350_TX_MAX_CNT3     (RT5350_PDMA_OFFSET + 0x34)
-#define RT5350_TX_CTX_IDX3     (RT5350_PDMA_OFFSET + 0x38)
-#define RT5350_TX_DTX_IDX3     (RT5350_PDMA_OFFSET + 0x3C)
-#define RT5350_RX_BASE_PTR0    (RT5350_PDMA_OFFSET + 0x100)
-#define RT5350_RX_MAX_CNT0     (RT5350_PDMA_OFFSET + 0x104)
-#define RT5350_RX_CALC_IDX0    (RT5350_PDMA_OFFSET + 0x108)
-#define RT5350_RX_DRX_IDX0     (RT5350_PDMA_OFFSET + 0x10C)
-#define RT5350_RX_BASE_PTR1    (RT5350_PDMA_OFFSET + 0x110)
-#define RT5350_RX_MAX_CNT1     (RT5350_PDMA_OFFSET + 0x114)
-#define RT5350_RX_CALC_IDX1    (RT5350_PDMA_OFFSET + 0x118)
-#define RT5350_RX_DRX_IDX1     (RT5350_PDMA_OFFSET + 0x11C)
-#define RT5350_PDMA_GLO_CFG    (RT5350_PDMA_OFFSET + 0x204)
-#define RT5350_PDMA_RST_CFG    (RT5350_PDMA_OFFSET + 0x208)
-#define RT5350_DLY_INT_CFG     (RT5350_PDMA_OFFSET + 0x20c)
-#define RT5350_MTK_INT_STATUS  (RT5350_PDMA_OFFSET + 0x220)
-#define RT5350_MTK_INT_ENABLE  (RT5350_PDMA_OFFSET + 0x228)
-#define RT5350_PDMA_SCH_CFG    (RT5350_PDMA_OFFSET + 0x280)
-
-#define MTK_PDMA_GLO_CFG       (MTK_PDMA_OFFSET + 0x00)
-#define MTK_PDMA_RST_CFG       (MTK_PDMA_OFFSET + 0x04)
-#define MTK_PDMA_SCH_CFG       (MTK_PDMA_OFFSET + 0x08)
-#define MTK_DLY_INT_CFG                (MTK_PDMA_OFFSET + 0x0C)
-#define MTK_TX_BASE_PTR0       (MTK_PDMA_OFFSET + 0x10)
-#define MTK_TX_MAX_CNT0                (MTK_PDMA_OFFSET + 0x14)
-#define MTK_TX_CTX_IDX0                (MTK_PDMA_OFFSET + 0x18)
-#define MTK_TX_DTX_IDX0                (MTK_PDMA_OFFSET + 0x1C)
-#define MTK_TX_BASE_PTR1       (MTK_PDMA_OFFSET + 0x20)
-#define MTK_TX_MAX_CNT1                (MTK_PDMA_OFFSET + 0x24)
-#define MTK_TX_CTX_IDX1                (MTK_PDMA_OFFSET + 0x28)
-#define MTK_TX_DTX_IDX1                (MTK_PDMA_OFFSET + 0x2C)
-#define MTK_RX_BASE_PTR0       (MTK_PDMA_OFFSET + 0x30)
-#define MTK_RX_MAX_CNT0                (MTK_PDMA_OFFSET + 0x34)
-#define MTK_RX_CALC_IDX0       (MTK_PDMA_OFFSET + 0x38)
-#define MTK_RX_DRX_IDX0                (MTK_PDMA_OFFSET + 0x3C)
-#define MTK_TX_BASE_PTR2       (MTK_PDMA_OFFSET + 0x40)
-#define MTK_TX_MAX_CNT2                (MTK_PDMA_OFFSET + 0x44)
-#define MTK_TX_CTX_IDX2                (MTK_PDMA_OFFSET + 0x48)
-#define MTK_TX_DTX_IDX2                (MTK_PDMA_OFFSET + 0x4C)
-#define MTK_TX_BASE_PTR3       (MTK_PDMA_OFFSET + 0x50)
-#define MTK_TX_MAX_CNT3                (MTK_PDMA_OFFSET + 0x54)
-#define MTK_TX_CTX_IDX3                (MTK_PDMA_OFFSET + 0x58)
-#define MTK_TX_DTX_IDX3                (MTK_PDMA_OFFSET + 0x5C)
-#define MTK_RX_BASE_PTR1       (MTK_PDMA_OFFSET + 0x60)
-#define MTK_RX_MAX_CNT1                (MTK_PDMA_OFFSET + 0x64)
-#define MTK_RX_CALC_IDX1       (MTK_PDMA_OFFSET + 0x68)
-#define MTK_RX_DRX_IDX1                (MTK_PDMA_OFFSET + 0x6C)
-
-/* Switch DMA configuration */
-#define RT5350_SDM_CFG         (RT5350_SDM_OFFSET + 0x00)
-#define RT5350_SDM_RRING       (RT5350_SDM_OFFSET + 0x04)
-#define RT5350_SDM_TRING       (RT5350_SDM_OFFSET + 0x08)
-#define RT5350_SDM_MAC_ADRL    (RT5350_SDM_OFFSET + 0x0C)
-#define RT5350_SDM_MAC_ADRH    (RT5350_SDM_OFFSET + 0x10)
-#define RT5350_SDM_TPCNT       (RT5350_SDM_OFFSET + 0x100)
-#define RT5350_SDM_TBCNT       (RT5350_SDM_OFFSET + 0x104)
-#define RT5350_SDM_RPCNT       (RT5350_SDM_OFFSET + 0x108)
-#define RT5350_SDM_RBCNT       (RT5350_SDM_OFFSET + 0x10C)
-#define RT5350_SDM_CS_ERR      (RT5350_SDM_OFFSET + 0x110)
-
-#define RT5350_SDM_ICS_EN      BIT(16)
-#define RT5350_SDM_TCS_EN      BIT(17)
-#define RT5350_SDM_UCS_EN      BIT(18)
-
-/* QDMA registers */
-#define MTK_QTX_CFG(x)         (0x1800 + (x * 0x10))
-#define MTK_QTX_SCH(x)         (0x1804 + (x * 0x10))
-#define MTK_QRX_BASE_PTR0      0x1900
-#define MTK_QRX_MAX_CNT0       0x1904
-#define MTK_QRX_CRX_IDX0       0x1908
-#define MTK_QRX_DRX_IDX0       0x190C
-#define MTK_QDMA_GLO_CFG       0x1A04
-#define MTK_QDMA_RST_IDX       0x1A08
-#define MTK_QDMA_DELAY_INT     0x1A0C
-#define MTK_QDMA_FC_THRES      0x1A10
-#define MTK_QMTK_INT_STATUS    0x1A18
-#define MTK_QMTK_INT_ENABLE    0x1A1C
-#define MTK_QDMA_HRED2         0x1A44
-
-#define MTK_QTX_CTX_PTR                0x1B00
-#define MTK_QTX_DTX_PTR                0x1B04
-
-#define MTK_QTX_CRX_PTR                0x1B10
-#define MTK_QTX_DRX_PTR                0x1B14
-
-#define MTK_QDMA_FQ_HEAD       0x1B20
-#define MTK_QDMA_FQ_TAIL       0x1B24
-#define MTK_QDMA_FQ_CNT                0x1B28
-#define MTK_QDMA_FQ_BLEN       0x1B2C
-
-#define QDMA_PAGE_SIZE         2048
-#define QDMA_TX_OWNER_CPU      BIT(31)
-#define QDMA_TX_SWC            BIT(14)
-#define TX_QDMA_SDL(_x)                (((_x) & 0x3fff) << 16)
-#define QDMA_RES_THRES         4
-
-/* MDIO_CFG register bits */
-#define MTK_MDIO_CFG_AUTO_POLL_EN      BIT(29)
-#define MTK_MDIO_CFG_GP1_BP_EN         BIT(16)
-#define MTK_MDIO_CFG_GP1_FRC_EN                BIT(15)
-#define MTK_MDIO_CFG_GP1_SPEED_10      (0 << 13)
-#define MTK_MDIO_CFG_GP1_SPEED_100     (1 << 13)
-#define MTK_MDIO_CFG_GP1_SPEED_1000    (2 << 13)
-#define MTK_MDIO_CFG_GP1_DUPLEX                BIT(12)
-#define MTK_MDIO_CFG_GP1_FC_TX         BIT(11)
-#define MTK_MDIO_CFG_GP1_FC_RX         BIT(10)
-#define MTK_MDIO_CFG_GP1_LNK_DWN       BIT(9)
-#define MTK_MDIO_CFG_GP1_AN_FAIL       BIT(8)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_1     (0 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_2     (1 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_4     (2 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_8     (3 << 6)
-#define MTK_MDIO_CFG_TURBO_MII_FREQ    BIT(5)
-#define MTK_MDIO_CFG_TURBO_MII_MODE    BIT(4)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_0     (0 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_200   (1 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_400   (2 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_INV   (3 << 2)
-#define MTK_MDIO_CFG_TX_CLK_SKEW_0     0
-#define MTK_MDIO_CFG_TX_CLK_SKEW_200   1
-#define MTK_MDIO_CFG_TX_CLK_SKEW_400   2
-#define MTK_MDIO_CFG_TX_CLK_SKEW_INV   3
-
-/* uni-cast port */
-#define MTK_GDM1_JMB_LEN_MASK  0xf
-#define MTK_GDM1_JMB_LEN_SHIFT 28
-#define MTK_GDM1_ICS_EN                BIT(22)
-#define MTK_GDM1_TCS_EN                BIT(21)
-#define MTK_GDM1_UCS_EN                BIT(20)
-#define MTK_GDM1_JMB_EN                BIT(19)
-#define MTK_GDM1_STRPCRC       BIT(16)
-#define MTK_GDM1_UFRC_P_CPU    (0 << 12)
-#define MTK_GDM1_UFRC_P_GDMA1  (1 << 12)
-#define MTK_GDM1_UFRC_P_PPE    (6 << 12)
-
-/* checksums */
-#define MTK_ICS_GEN_EN         BIT(2)
-#define MTK_UCS_GEN_EN         BIT(1)
-#define MTK_TCS_GEN_EN         BIT(0)
-
-/* dma mode */
-#define MTK_PDMA               BIT(0)
-#define MTK_QDMA               BIT(1)
-#define MTK_PDMA_RX_QDMA_TX    (MTK_PDMA | MTK_QDMA)
-
-/* dma ring */
-#define MTK_PST_DRX_IDX0       BIT(16)
-#define MTK_PST_DTX_IDX3       BIT(3)
-#define MTK_PST_DTX_IDX2       BIT(2)
-#define MTK_PST_DTX_IDX1       BIT(1)
-#define MTK_PST_DTX_IDX0       BIT(0)
-
-#define MTK_RX_2B_OFFSET       BIT(31)
-#define MTK_TX_WB_DDONE                BIT(6)
-#define MTK_RX_DMA_BUSY                BIT(3)
-#define MTK_TX_DMA_BUSY                BIT(1)
-#define MTK_RX_DMA_EN          BIT(2)
-#define MTK_TX_DMA_EN          BIT(0)
-
-#define MTK_PDMA_SIZE_4DWORDS  (0 << 4)
-#define MTK_PDMA_SIZE_8DWORDS  (1 << 4)
-#define MTK_PDMA_SIZE_16DWORDS (2 << 4)
-
-#define MTK_US_CYC_CNT_MASK    0xff
-#define MTK_US_CYC_CNT_SHIFT   0x8
-#define MTK_US_CYC_CNT_DIVISOR 1000000
-
-/* PDMA descriptor rxd2 */
-#define RX_DMA_DONE            BIT(31)
-#define RX_DMA_LSO             BIT(30)
-#define RX_DMA_PLEN0(_x)       (((_x) & 0x3fff) << 16)
-#define RX_DMA_GET_PLEN0(_x)   (((_x) >> 16) & 0x3fff)
-#define RX_DMA_TAG             BIT(15)
-
-/* PDMA descriptor rxd3 */
-#define RX_DMA_TPID(_x)                (((_x) >> 16) & 0xffff)
-#define RX_DMA_VID(_x)         ((_x) & 0xfff)
-
-/* PDMA descriptor rxd4 */
-#define RX_DMA_L4VALID         BIT(30)
-#define RX_DMA_FPORT_SHIFT     19
-#define RX_DMA_FPORT_MASK      0x7
-
-struct mtk_rx_dma {
-       unsigned int rxd1;
-       unsigned int rxd2;
-       unsigned int rxd3;
-       unsigned int rxd4;
-} __packed __aligned(4);
-
-/* PDMA tx descriptor bits */
-#define TX_DMA_BUF_LEN         0x3fff
-#define TX_DMA_PLEN0_MASK      (TX_DMA_BUF_LEN << 16)
-#define TX_DMA_PLEN0(_x)       (((_x) & TX_DMA_BUF_LEN) << 16)
-#define TX_DMA_PLEN1(_x)       ((_x) & TX_DMA_BUF_LEN)
-#define TX_DMA_GET_PLEN0(_x)    (((_x) >> 16) & TX_DMA_BUF_LEN)
-#define TX_DMA_GET_PLEN1(_x)    ((_x) & TX_DMA_BUF_LEN)
-#define TX_DMA_LS1             BIT(14)
-#define TX_DMA_LS0             BIT(30)
-#define TX_DMA_DONE            BIT(31)
-#define TX_DMA_FPORT_SHIFT     25
-#define TX_DMA_FPORT_MASK      0x7
-#define TX_DMA_INS_VLAN_MT7621 BIT(16)
-#define TX_DMA_INS_VLAN                BIT(7)
-#define TX_DMA_INS_PPPOE       BIT(12)
-#define TX_DMA_TAG             BIT(15)
-#define TX_DMA_TAG_MASK                BIT(15)
-#define TX_DMA_QN(_x)          ((_x) << 16)
-#define TX_DMA_PN(_x)          ((_x) << 24)
-#define TX_DMA_QN_MASK         TX_DMA_QN(0x7)
-#define TX_DMA_PN_MASK         TX_DMA_PN(0x7)
-#define TX_DMA_UDF             BIT(20)
-#define TX_DMA_CHKSUM          (0x7 << 29)
-#define TX_DMA_TSO             BIT(28)
-#define TX_DMA_DESP4_DEF       (TX_DMA_QN(3) | TX_DMA_PN(1))
-
-/* frame engine counters */
-#define MTK_PPE_AC_BCNT0       (MTK_CMTABLE_OFFSET + 0x00)
-#define MTK_GDMA1_TX_GBCNT     (MTK_CMTABLE_OFFSET + 0x300)
-#define MTK_GDMA2_TX_GBCNT     (MTK_GDMA1_TX_GBCNT + 0x40)
-
-/* phy device flags */
-#define MTK_PHY_FLAG_PORT      BIT(0)
-#define MTK_PHY_FLAG_ATTACH    BIT(1)
-
-struct mtk_tx_dma {
-       unsigned int txd1;
-       unsigned int txd2;
-       unsigned int txd3;
-       unsigned int txd4;
-} __packed __aligned(4);
-
-struct mtk_eth;
-struct mtk_mac;
-
-/* manage the attached phys */
-struct mtk_phy {
-       spinlock_t              lock;
-
-       struct phy_device       *phy[8];
-       struct device_node      *phy_node[8];
-       const __be32            *phy_fixed[8];
-       int                     duplex[8];
-       int                     speed[8];
-       int                     tx_fc[8];
-       int                     rx_fc[8];
-       int (*connect)(struct mtk_mac *mac);
-       void (*disconnect)(struct mtk_mac *mac);
-       void (*start)(struct mtk_mac *mac);
-       void (*stop)(struct mtk_mac *mac);
-};
-
-/* struct mtk_soc_data - the structure that holds the SoC specific data
- * @reg_table:         Some of the legacy registers changed their location
- *                     over time. Their offsets are stored in this table
- *
- * @init_data:         Some features depend on the silicon revision. This
- *                     callback allows runtime modification of the content of
- *                     this struct
- * @reset_fe:          This callback is used to trigger the reset of the frame
- *                     engine
- * @set_mac:           This callback is used to set the unicast mac address
- *                     filter
- * @fwd_config:                This callback is used to setup the forward config
- *                     register of the MAC
- * @switch_init:       This callback is used to bring up the switch core
- * @port_init:         Some SoCs have ports that can be router to a switch port
- *                     or an external PHY. This callback is used to setup these
- *                     ports.
- * @has_carrier:       This callback allows driver to check if there is a cable
- *                     attached.
- * @mdio_init:         This callbck is used to setup the MDIO bus if one is
- *                     present
- * @mdio_cleanup:      This callback is used to cleanup the MDIO state.
- * @mdio_write:                This callback is used to write data to the MDIO bus.
- * @mdio_read:         This callback is used to write data to the MDIO bus.
- * @mdio_adjust_link:  This callback is used to apply the PHY settings.
- * @piac_offset:       the PIAC register has a different different base offset
- * @hw_features:       feature set depends on the SoC type
- * @dma_ring_size:     allow GBit SoCs to set bigger rings than FE SoCs
- * @napi_weight:       allow GBit SoCs to set bigger napi weight than FE SoCs
- * @dma_type:          SoCs is PDMA, QDMA or a mix of the 2
- * @pdma_glo_cfg:      the default DMA configuration
- * @rx_int:            the TX interrupt bits used by the SoC
- * @tx_int:            the TX interrupt bits used by the SoC
- * @status_int:                the Status interrupt bits used by the SoC
- * @checksum_bit:      the bits used to turn on HW checksumming
- * @txd4:              default value of the TXD4 descriptor
- * @mac_count:         the number of MACs that the SoC has
- * @new_stats:         there is a old and new way to read hardware stats
- *                     registers
- * @jumbo_frame:       does the SoC support jumbo frames ?
- * @rx_2b_offset:      tell the rx dma to offset the data by 2 bytes
- * @rx_sg_dma:         scatter gather support
- * @padding_64b                enable 64 bit padding
- * @padding_bug:       rt2880 has a padding bug
- * @has_switch:                does the SoC have a built-in switch
- *
- * Although all of the supported SoCs share the same basic functionality, there
- * are several SoC specific functions and features that we need to support. This
- * struct holds the SoC specific data so that the common core can figure out
- * how to setup and use these differences.
- */
-struct mtk_soc_data {
-       const u16 *reg_table;
-
-       void (*init_data)(struct mtk_soc_data *data, struct net_device *netdev);
-       void (*reset_fe)(struct mtk_eth *eth);
-       void (*set_mac)(struct mtk_mac *mac, unsigned char *macaddr);
-       int (*fwd_config)(struct mtk_eth *eth);
-       int (*switch_init)(struct mtk_eth *eth);
-       void (*port_init)(struct mtk_eth *eth, struct mtk_mac *mac,
-                         struct device_node *port);
-       int (*has_carrier)(struct mtk_eth *eth);
-       int (*mdio_init)(struct mtk_eth *eth);
-       void (*mdio_cleanup)(struct mtk_eth *eth);
-       int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg,
-                         u16 val);
-       int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg);
-       void (*mdio_adjust_link)(struct mtk_eth *eth, int port);
-       u32 piac_offset;
-       netdev_features_t hw_features;
-       u32 dma_ring_size;
-       u32 napi_weight;
-       u32 dma_type;
-       u32 pdma_glo_cfg;
-       u32 rx_int;
-       u32 tx_int;
-       u32 status_int;
-       u32 checksum_bit;
-       u32 txd4;
-       u32 mac_count;
-
-       u32 new_stats:1;
-       u32 jumbo_frame:1;
-       u32 rx_2b_offset:1;
-       u32 rx_sg_dma:1;
-       u32 padding_64b:1;
-       u32 padding_bug:1;
-       u32 has_switch:1;
-};
-
-#define MTK_STAT_OFFSET                        0x40
-
-/* struct mtk_hw_stats - the structure that holds the traffic statistics.
- * @stats_lock:                make sure that stats operations are atomic
- * @reg_offset:                the status register offset of the SoC
- * @syncp:             the refcount
- *
- * All of the supported SoCs have hardware counters for traffic statstics.
- * Whenever the status IRQ triggers we can read the latest stats from these
- * counters and store them in this struct.
- */
-struct mtk_hw_stats {
-       spinlock_t stats_lock;
-       u32 reg_offset;
-       struct u64_stats_sync syncp;
-
-       u64 tx_bytes;
-       u64 tx_packets;
-       u64 tx_skip;
-       u64 tx_collisions;
-       u64 rx_bytes;
-       u64 rx_packets;
-       u64 rx_overflow;
-       u64 rx_fcs_errors;
-       u64 rx_short_errors;
-       u64 rx_long_errors;
-       u64 rx_checksum_errors;
-       u64 rx_flow_control_packets;
-};
-
-/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
- * memory was allocated so that it can be freed properly
- */
-enum mtk_tx_flags {
-       MTK_TX_FLAGS_SINGLE0    = 0x01,
-       MTK_TX_FLAGS_PAGE0      = 0x02,
-       MTK_TX_FLAGS_PAGE1      = 0x04,
-};
-
-/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
- *                     by the TX descriptor    s
- * @skb:               The SKB pointer of the packet being sent
- * @dma_addr0:         The base addr of the first segment
- * @dma_len0:          The length of the first segment
- * @dma_addr1:         The base addr of the second segment
- * @dma_len1:          The length of the second segment
- */
-struct mtk_tx_buf {
-       struct sk_buff *skb;
-       u32 flags;
-       DEFINE_DMA_UNMAP_ADDR(dma_addr0);
-       DEFINE_DMA_UNMAP_LEN(dma_len0);
-       DEFINE_DMA_UNMAP_ADDR(dma_addr1);
-       DEFINE_DMA_UNMAP_LEN(dma_len1);
-};
-
-/* struct mtk_tx_ring -        This struct holds info describing a TX ring
- * @tx_dma:            The descriptor ring
- * @tx_buf:            The memory pointed at by the ring
- * @tx_phys:           The physical addr of tx_buf
- * @tx_next_free:      Pointer to the next free descriptor
- * @tx_last_free:      Pointer to the last free descriptor
- * @tx_thresh:         The threshold of minimum amount of free descriptors
- * @tx_map:            Callback to map a new packet into the ring
- * @tx_poll:           Callback for the housekeeping function
- * @tx_clean:          Callback for the cleanup function
- * @tx_ring_size:      How many descriptors are in the ring
- * @tx_free_idx:       The index of th next free descriptor
- * @tx_next_idx:       QDMA uses a linked list. This element points to the next
- *                     free descriptor in the list
- * @tx_free_count:     QDMA uses a linked list. Track how many free descriptors
- *                     are present
- */
-struct mtk_tx_ring {
-       struct mtk_tx_dma *tx_dma;
-       struct mtk_tx_buf *tx_buf;
-       dma_addr_t tx_phys;
-       struct mtk_tx_dma *tx_next_free;
-       struct mtk_tx_dma *tx_last_free;
-       u16 tx_thresh;
-       int (*tx_map)(struct sk_buff *skb, struct net_device *dev, int tx_num,
-                     struct mtk_tx_ring *ring, bool gso);
-       int (*tx_poll)(struct mtk_eth *eth, int budget, bool *tx_again);
-       void (*tx_clean)(struct mtk_eth *eth);
-
-       /* PDMA only */
-       u16 tx_ring_size;
-       u16 tx_free_idx;
-
-       /* QDMA only */
-       u16 tx_next_idx;
-       atomic_t tx_free_count;
-};
-
-/* struct mtk_rx_ring -        This struct holds info describing a RX ring
- * @rx_dma:            The descriptor ring
- * @rx_data:           The memory pointed at by the ring
- * @trx_phys:          The physical addr of rx_buf
- * @rx_ring_size:      How many descriptors are in the ring
- * @rx_buf_size:       The size of each packet buffer
- * @rx_calc_idx:       The current head of ring
- */
-struct mtk_rx_ring {
-       struct mtk_rx_dma *rx_dma;
-       u8 **rx_data;
-       dma_addr_t rx_phys;
-       u16 rx_ring_size;
-       u16 frag_size;
-       u16 rx_buf_size;
-       u16 rx_calc_idx;
-};
-
-/* currently no SoC has more than 2 macs */
-#define MTK_MAX_DEVS                   2
-
-/* struct mtk_eth -    This is the main datasructure for holding the state
- *                     of the driver
- * @dev:               The device pointer
- * @base:              The mapped register i/o base
- * @page_lock:         Make sure that register operations are atomic
- * @soc:               pointer to our SoC specific data
- * @dummy_dev:         we run 2 netdevs on 1 physical DMA ring and need a
- *                     dummy for NAPI to work
- * @netdev:            The netdev instances
- * @mac:               Each netdev is linked to a physical MAC
- * @switch_np:         The phandle for the switch
- * @irq:               The IRQ that we are using
- * @msg_enable:                Ethtool msg level
- * @ysclk:             The sysclk rate - neeed for calibration
- * @ethsys:            The register map pointing at the range used to setup
- *                     MII modes
- * @dma_refcnt:                track how many netdevs are using the DMA engine
- * @tx_ring:           Pointer to the memore holding info about the TX ring
- * @rx_ring:           Pointer to the memore holding info about the RX ring
- * @rx_napi:           The NAPI struct
- * @scratch_ring:      Newer SoCs need memory for a second HW managed TX ring
- * @scratch_head:      The scratch memory that scratch_ring points to.
- * @phy:               Info about the attached PHYs
- * @mii_bus:           If there is a bus we need to create an instance for it
- * @link:              Track if the ports have a physical link
- * @sw_priv:           Pointer to the switches private data
- * @vlan_map:          RX VID tracking
- */
-
-struct mtk_eth {
-       struct device                   *dev;
-       void __iomem                    *base;
-       spinlock_t                      page_lock;
-       struct mtk_soc_data             *soc;
-       struct net_device               dummy_dev;
-       struct net_device               *netdev[MTK_MAX_DEVS];
-       struct mtk_mac                  *mac[MTK_MAX_DEVS];
-       struct device_node              *switch_np;
-       int                             irq;
-       u32                             msg_enable;
-       unsigned long                   sysclk;
-       struct regmap                   *ethsys;
-       atomic_t                        dma_refcnt;
-       struct mtk_tx_ring              tx_ring;
-       struct mtk_rx_ring              rx_ring[2];
-       struct napi_struct              rx_napi;
-       struct mtk_tx_dma               *scratch_ring;
-       void                            *scratch_head;
-       struct mtk_phy                  *phy;
-       struct mii_bus                  *mii_bus;
-       int                             link[8];
-       void                            *sw_priv;
-       unsigned long                   vlan_map;
-};
-
-/* struct mtk_mac -    the structure that holds the info about the MACs of the
- *                     SoC
- * @id:                        The number of the MAC
- * @of_node:           Our devicetree node
- * @hw:                        Backpointer to our main datastruture
- * @hw_stats:          Packet statistics counter
- * @phy_dev:           The attached PHY if available
- * @phy_flags:         The PHYs flags
- * @pending_work:      The workqueue used to reset the dma ring
- */
-struct mtk_mac {
-       int                             id;
-       struct device_node              *of_node;
-       struct mtk_eth                  *hw;
-       struct mtk_hw_stats             *hw_stats;
-       struct phy_device               *phy_dev;
-       u32                             phy_flags;
-       struct work_struct              pending_work;
-};
-
-/* the struct describing the SoC. these are declared in the soc_xyz.c files */
-extern const struct of_device_id of_mtk_match[];
-
-/* read the hardware status register */
-void mtk_stats_update_mac(struct mtk_mac *mac);
-
-/* default checksum setup handler */
-void mtk_reset(struct mtk_eth *eth, u32 reset_bits);
-
-/* register i/o wrappers */
-void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg);
-u32 mtk_r32(struct mtk_eth *eth, unsigned int reg);
-
-/* default clock calibration handler */
-int mtk_set_clock_cycle(struct mtk_eth *eth);
-
-/* default checksum setup handler */
-void mtk_csum_config(struct mtk_eth *eth);
-
-/* default forward config handler */
-void mtk_fwd_config(struct mtk_eth *eth);
-
-#endif /* MTK_ETH_H */
diff --git a/drivers/staging/mt7621-eth/soc_mt7621.c b/drivers/staging/mt7621-eth/soc_mt7621.c
deleted file mode 100644 (file)
index 5d63b5d..0000000
+++ /dev/null
@@ -1,161 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/if_vlan.h>
-#include <linux/of_net.h>
-
-#include <asm/mach-ralink/ralink_regs.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-#include "mdio.h"
-
-#define MT7620_CDMA_CSG_CFG    0x400
-#define MT7621_CDMP_IG_CTRL    (MT7620_CDMA_CSG_CFG + 0x00)
-#define MT7621_CDMP_EG_CTRL    (MT7620_CDMA_CSG_CFG + 0x04)
-#define MT7621_RESET_FE                BIT(6)
-#define MT7621_L4_VALID                BIT(24)
-
-#define MT7621_TX_DMA_UDF      BIT(19)
-
-#define CDMA_ICS_EN            BIT(2)
-#define CDMA_UCS_EN            BIT(1)
-#define CDMA_TCS_EN            BIT(0)
-
-#define GDMA_ICS_EN            BIT(22)
-#define GDMA_TCS_EN            BIT(21)
-#define GDMA_UCS_EN            BIT(20)
-
-/* frame engine counters */
-#define MT7621_REG_MIB_OFFSET  0x2000
-#define MT7621_PPE_AC_BCNT0    (MT7621_REG_MIB_OFFSET + 0x00)
-#define MT7621_GDM1_TX_GBCNT   (MT7621_REG_MIB_OFFSET + 0x400)
-#define MT7621_GDM2_TX_GBCNT   (MT7621_GDM1_TX_GBCNT + 0x40)
-
-#define GSW_REG_GDMA1_MAC_ADRL 0x508
-#define GSW_REG_GDMA1_MAC_ADRH 0x50C
-#define GSW_REG_GDMA2_MAC_ADRL 0x1508
-#define GSW_REG_GDMA2_MAC_ADRH 0x150C
-
-#define MT7621_MTK_RST_GL      0x04
-#define MT7620_MTK_INT_STATUS2 0x08
-
-/* MTK_INT_STATUS reg on mt7620 define CNT_GDM1_AF at BIT(29)
- * but after test it should be BIT(13).
- */
-#define MT7621_MTK_GDM1_AF     BIT(28)
-#define MT7621_MTK_GDM2_AF     BIT(29)
-
-static const u16 mt7621_reg_table[MTK_REG_COUNT] = {
-       [MTK_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG,
-       [MTK_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG,
-       [MTK_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG,
-       [MTK_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0,
-       [MTK_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0,
-       [MTK_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0,
-       [MTK_REG_TX_DTX_IDX0] = RT5350_TX_DTX_IDX0,
-       [MTK_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0,
-       [MTK_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0,
-       [MTK_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0,
-       [MTK_REG_RX_DRX_IDX0] = RT5350_RX_DRX_IDX0,
-       [MTK_REG_MTK_INT_ENABLE] = RT5350_MTK_INT_ENABLE,
-       [MTK_REG_MTK_INT_STATUS] = RT5350_MTK_INT_STATUS,
-       [MTK_REG_MTK_DMA_VID_BASE] = 0,
-       [MTK_REG_MTK_COUNTER_BASE] = MT7621_GDM1_TX_GBCNT,
-       [MTK_REG_MTK_RST_GL] = MT7621_MTK_RST_GL,
-       [MTK_REG_MTK_INT_STATUS2] = MT7620_MTK_INT_STATUS2,
-};
-
-static void mt7621_mtk_reset(struct mtk_eth *eth)
-{
-       mtk_reset(eth, MT7621_RESET_FE);
-}
-
-static int mt7621_fwd_config(struct mtk_eth *eth)
-{
-       /* Setup GMAC1 only, there is no support for GMAC2 yet */
-       mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) & ~0xffff,
-               MT7620_GDMA1_FWD_CFG);
-
-       /* Enable RX checksum */
-       mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) | (GDMA_ICS_EN |
-                      GDMA_TCS_EN | GDMA_UCS_EN),
-                      MT7620_GDMA1_FWD_CFG);
-
-       /* Enable RX VLan Offloading */
-       mtk_w32(eth, 0, MT7621_CDMP_EG_CTRL);
-
-       return 0;
-}
-
-static void mt7621_set_mac(struct mtk_mac *mac, unsigned char *hwaddr)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&mac->hw->page_lock, flags);
-       if (mac->id == 0) {
-               mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
-                       GSW_REG_GDMA1_MAC_ADRH);
-               mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
-                       (hwaddr[4] << 8) | hwaddr[5],
-                       GSW_REG_GDMA1_MAC_ADRL);
-       }
-       if (mac->id == 1) {
-               mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
-                       GSW_REG_GDMA2_MAC_ADRH);
-               mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
-                       (hwaddr[4] << 8) | hwaddr[5],
-                       GSW_REG_GDMA2_MAC_ADRL);
-       }
-       spin_unlock_irqrestore(&mac->hw->page_lock, flags);
-}
-
-static struct mtk_soc_data mt7621_data = {
-       .hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
-                      NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
-                      NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
-                      NETIF_F_IPV6_CSUM,
-       .dma_type = MTK_PDMA,
-       .dma_ring_size = 256,
-       .napi_weight = 64,
-       .new_stats = 1,
-       .padding_64b = 1,
-       .rx_2b_offset = 1,
-       .rx_sg_dma = 1,
-       .has_switch = 1,
-       .mac_count = 2,
-       .reset_fe = mt7621_mtk_reset,
-       .set_mac = mt7621_set_mac,
-       .fwd_config = mt7621_fwd_config,
-       .switch_init = mtk_gsw_init,
-       .reg_table = mt7621_reg_table,
-       .pdma_glo_cfg = MTK_PDMA_SIZE_16DWORDS,
-       .rx_int = RT5350_RX_DONE_INT,
-       .tx_int = RT5350_TX_DONE_INT,
-       .status_int = MT7621_MTK_GDM1_AF | MT7621_MTK_GDM2_AF,
-       .checksum_bit = MT7621_L4_VALID,
-       .has_carrier = mt7620_has_carrier,
-       .mdio_read = mt7620_mdio_read,
-       .mdio_write = mt7620_mdio_write,
-       .mdio_adjust_link = mt7620_mdio_link_adjust,
-};
-
-const struct of_device_id of_mtk_match[] = {
-       { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
-       {},
-};
-
-MODULE_DEVICE_TABLE(of, of_mtk_match);
index d33533872a16f1c4e4e3d7207d6cc04cd6a63379..c8fa17cfa807a24f6450034c714ebc94b59507d4 100644 (file)
@@ -1,6 +1,7 @@
 config PCI_MT7621
        tristate "MediaTek MT7621 PCI Controller"
        depends on RALINK
+       depends on PCI
        select PCI_DRIVERS_GENERIC
        help
          This selects a driver for the MediaTek MT7621 PCI Controller.
index d6248eecf123bdc5ad123ac1fd7b5821b9a22e24..2aee64fdaec555abf8734aef11c7d269dca86150 100644 (file)
@@ -163,7 +163,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
                goto no_phy;
 
        phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
-                               PHY_INTERFACE_MODE_GMII);
+                               priv->phy_mode);
        of_node_put(phy_node);
 
        if (!phydev)
index ce61c5670ef645c78e080ab588b0d75c7591378c..986db76705ccc6b5d384f2db08f715739936d942 100644 (file)
@@ -653,14 +653,37 @@ static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
        return np;
 }
 
-static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port)
+static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
+                               int port)
 {
+       struct device_node *np = priv->of_node;
        u32 delay_value;
+       bool rx_delay;
+       bool tx_delay;
 
-       if (!of_property_read_u32(np, "rx-delay", &delay_value))
+       /* By default, both RX/TX delay is enabled in
+        * __cvmx_helper_rgmii_enable().
+        */
+       rx_delay = true;
+       tx_delay = true;
+
+       if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
                cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
-       if (!of_property_read_u32(np, "tx-delay", &delay_value))
+               rx_delay = delay_value > 0;
+       }
+       if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
                cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
+               tx_delay = delay_value > 0;
+       }
+
+       if (!rx_delay && !tx_delay)
+               priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
+       else if (!rx_delay)
+               priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
+       else if (!tx_delay)
+               priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
+       else
+               priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
 }
 
 static int cvm_oct_probe(struct platform_device *pdev)
@@ -825,6 +848,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
                        priv->port = port;
                        priv->queue = cvmx_pko_get_base_queue(priv->port);
                        priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
+                       priv->phy_mode = PHY_INTERFACE_MODE_NA;
                        for (qos = 0; qos < 16; qos++)
                                skb_queue_head_init(&priv->tx_free_list[qos]);
                        for (qos = 0; qos < cvmx_pko_get_num_queues(port);
@@ -856,6 +880,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
                                break;
 
                        case CVMX_HELPER_INTERFACE_MODE_SGMII:
+                               priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
                                dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
                                strcpy(dev->name, "eth%d");
                                break;
@@ -865,11 +890,16 @@ static int cvm_oct_probe(struct platform_device *pdev)
                                strcpy(dev->name, "spi%d");
                                break;
 
-                       case CVMX_HELPER_INTERFACE_MODE_RGMII:
                        case CVMX_HELPER_INTERFACE_MODE_GMII:
+                               priv->phy_mode = PHY_INTERFACE_MODE_GMII;
+                               dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
+                               strcpy(dev->name, "eth%d");
+                               break;
+
+                       case CVMX_HELPER_INTERFACE_MODE_RGMII:
                                dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
                                strcpy(dev->name, "eth%d");
-                               cvm_set_rgmii_delay(priv->of_node, interface,
+                               cvm_set_rgmii_delay(priv, interface,
                                                    port_index);
                                break;
                        }
index 4a07e7f43d128cb6903891ad6fcbd399b61e0e61..be570d33685add6873f62740f7c333b8920096aa 100644 (file)
@@ -12,7 +12,7 @@
 #define OCTEON_ETHERNET_H
 
 #include <linux/of.h>
-
+#include <linux/phy.h>
 #include <asm/octeon/cvmx-helper-board.h>
 
 /**
@@ -33,6 +33,8 @@ struct octeon_ethernet {
         * cvmx_helper_interface_mode_t
         */
        int imode;
+       /* PHY mode */
+       phy_interface_t phy_mode;
        /* List of outstanding tx buffers per queue */
        struct sk_buff_head tx_free_list[16];
        unsigned int last_speed;
index 80b8d4153414a80d555d1dfe87fb48e77e684e10..a54286498a477fd2b935683f695d6e57ba8382d5 100644 (file)
@@ -45,7 +45,7 @@ static int dcon_init_xo_1(struct dcon_priv *dcon)
 {
        unsigned char lob;
        int ret, i;
-       struct dcon_gpio *pin = &gpios_asis[0];
+       const struct dcon_gpio *pin = &gpios_asis[0];
 
        for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) {
                gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name,
index 1723a47a96b4092fb16df938f9ec1699c220f0f0..952f2ab5134783db8e8978fd79282045a9f65db6 100644 (file)
@@ -174,7 +174,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
 
        pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
 
-       rtw_alloc_hwxmits(padapter);
+       res = rtw_alloc_hwxmits(padapter);
+       if (res == _FAIL)
+               goto exit;
        rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
 
        for (i = 0; i < 4; i++)
@@ -1503,7 +1505,7 @@ exit:
        return res;
 }
 
-void rtw_alloc_hwxmits(struct adapter *padapter)
+s32 rtw_alloc_hwxmits(struct adapter *padapter)
 {
        struct hw_xmit *hwxmits;
        struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1512,6 +1514,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 
        pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry,
                                     sizeof(struct hw_xmit), GFP_KERNEL);
+       if (!pxmitpriv->hwxmits)
+               return _FAIL;
 
        hwxmits = pxmitpriv->hwxmits;
 
@@ -1519,6 +1523,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
        hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
        hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
        hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
+       return _SUCCESS;
 }
 
 void rtw_free_hwxmits(struct adapter *padapter)
index 788f59c74ea1e45fb7e598a8f83361b586647969..ba7e15fbde72d60ab9f5589756f024642359a82f 100644 (file)
@@ -336,7 +336,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
 void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
 s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
 void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
-void rtw_alloc_hwxmits(struct adapter *padapter);
+s32 rtw_alloc_hwxmits(struct adapter *padapter);
 void rtw_free_hwxmits(struct adapter *padapter);
 s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
 
index 1920d02f7c9f3724cb1516f857875f1bb8a76cfd..8c36acedf50769312ab170c983c192824fcad66a 100644 (file)
@@ -147,17 +147,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
 
 static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
 {
-       u32 val;
-       void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj      *pcmd);
        struct cmd_obj *pcmd  = (struct cmd_obj *)pbuf;
 
-       if (pcmd->rsp && pcmd->rspsz > 0)
-               memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
-       pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
-       if (!pcmd_callback)
-               r8712_free_cmd_obj(pcmd);
-       else
-               pcmd_callback(padapter, pcmd);
+       r8712_free_cmd_obj(pcmd);
        return H2C_SUCCESS;
 }
 
index 92fb77666d4462d411d927d26a7bd58d1503c8ca..1ef86b8c592f1490c41bb5436fb45d4e8e065ed5 100644 (file)
@@ -140,7 +140,7 @@ enum rtl8712_h2c_cmd {
 static struct _cmd_callback    cmd_callback[] = {
        {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
        {GEN_CMD_CODE(_Write_MACREG), NULL},
-       {GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback},
+       {GEN_CMD_CODE(_Read_BBREG), NULL},
        {GEN_CMD_CODE(_Write_BBREG), NULL},
        {GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback},
        {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
index 094d61bcb46983226a2cbf8974c5dd2e78758e05..b87f13a0b5639acbbb9d88265a6e810ddfb80924 100644 (file)
@@ -260,7 +260,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
                }
        }
 
-       rtw_alloc_hwxmits(padapter);
+       res = rtw_alloc_hwxmits(padapter);
+       if (res == _FAIL)
+               goto exit;
        rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
 
        for (i = 0; i < 4; i++) {
@@ -2144,7 +2146,7 @@ exit:
        return res;
 }
 
-void rtw_alloc_hwxmits(struct adapter *padapter)
+s32 rtw_alloc_hwxmits(struct adapter *padapter)
 {
        struct hw_xmit *hwxmits;
        struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -2155,10 +2157,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 
        pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
 
-       if (pxmitpriv->hwxmits == NULL) {
-               DBG_871X("alloc hwxmits fail!...\n");
-               return;
-       }
+       if (!pxmitpriv->hwxmits)
+               return _FAIL;
 
        hwxmits = pxmitpriv->hwxmits;
 
@@ -2204,7 +2204,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 
        }
 
-
+       return _SUCCESS;
 }
 
 void rtw_free_hwxmits(struct adapter *padapter)
index 1b38b9182b3165bdfacf97fcc2fa43c34b276468..37f42b2f22f1dcf173b2deeadc904df79f216036 100644 (file)
@@ -487,7 +487,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
 void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv);
 
 
-void rtw_alloc_hwxmits(struct adapter *padapter);
+s32 rtw_alloc_hwxmits(struct adapter *padapter);
 void rtw_free_hwxmits(struct adapter *padapter);
 
 
index 9930ed954abb2d8aa437a1784fbd3e3f83a043ca..4cc77b2016e1e45f14834820b429f7e8cda19b42 100644 (file)
@@ -180,6 +180,8 @@ static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv,
 
        rtlpriv->phydm.internal =
                kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL);
+       if (!rtlpriv->phydm.internal)
+               return 0;
 
        _rtl_phydm_init_com_info(rtlpriv, ic, params);
 
index f061dd1382aa102e53ac532844c8e2c9959ae568..cf6b7a80b753b35dc2d488e589f97f9d1899fedb 100644 (file)
@@ -743,6 +743,8 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
                      u1_rsvd_page_loc, 3);
 
        skb = dev_alloc_skb(totalpacketlen);
+       if (!skb)
+               return;
        memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet,
               totalpacketlen);
 
index edff6ce8565558f0671a4fb13119488a6dd254c4..9d85a3a1af4c5eadef475ffd20e39e66612955dc 100644 (file)
@@ -210,12 +210,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
                return -EINVAL;
 
        spin_lock_irqsave(&speakup_info.spinlock, flags);
+       synth_soft.alive = 1;
        while (1) {
                prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
-               if (!unicode)
-                       synth_buffer_skip_nonlatin1();
-               if (!synth_buffer_empty() || speakup_info.flushing)
-                       break;
+               if (synth_current() == &synth_soft) {
+                       if (!unicode)
+                               synth_buffer_skip_nonlatin1();
+                       if (!synth_buffer_empty() || speakup_info.flushing)
+                               break;
+               }
                spin_unlock_irqrestore(&speakup_info.spinlock, flags);
                if (fp->f_flags & O_NONBLOCK) {
                        finish_wait(&speakup_event, &wait);
@@ -235,6 +238,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
 
        /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
        while (chars_sent <= count - bytes_per_ch) {
+               if (synth_current() != &synth_soft)
+                       break;
                if (speakup_info.flushing) {
                        speakup_info.flushing = 0;
                        ch = '\x18';
@@ -331,7 +336,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
        poll_wait(fp, &speakup_event, wait);
 
        spin_lock_irqsave(&speakup_info.spinlock, flags);
-       if (!synth_buffer_empty() || speakup_info.flushing)
+       if (synth_current() == &synth_soft &&
+           (!synth_buffer_empty() || speakup_info.flushing))
                ret = EPOLLIN | EPOLLRDNORM;
        spin_unlock_irqrestore(&speakup_info.spinlock, flags);
        return ret;
index c8e688878fc705a47d88cfa1f4f73e2dceaefdc2..ac6a74883af4753d33906e62c366b3d4f8a1c677 100644 (file)
@@ -74,6 +74,7 @@ int synth_request_region(unsigned long start, unsigned long n);
 int synth_release_region(unsigned long start, unsigned long n);
 int synth_add(struct spk_synth *in_synth);
 void synth_remove(struct spk_synth *in_synth);
+struct spk_synth *synth_current(void);
 
 extern struct speakup_info_t speakup_info;
 
index 25f259ee4ffc74990e5a19c8560840ca9a59058e..3568bfb89912c3316d649b6c19223f4206936457 100644 (file)
@@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth)
 }
 EXPORT_SYMBOL_GPL(synth_remove);
 
+struct spk_synth *synth_current(void)
+{
+       return synth;
+}
+EXPORT_SYMBOL_GPL(synth_current);
+
 short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
index 804daf83be35172ecda66b35a9bbfd7e6e4fc5c2..064d0db4c51ef14af59f908a32df3c94fa768714 100644 (file)
@@ -3513,6 +3513,7 @@ static int vchiq_probe(struct platform_device *pdev)
        struct device_node *fw_node;
        const struct of_device_id *of_id;
        struct vchiq_drvdata *drvdata;
+       struct device *vchiq_dev;
        int err;
 
        of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
@@ -3547,9 +3548,12 @@ static int vchiq_probe(struct platform_device *pdev)
                goto failed_platform_init;
        }
 
-       if (IS_ERR(device_create(vchiq_class, &pdev->dev, vchiq_devid,
-                                NULL, "vchiq")))
+       vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
+                                 "vchiq");
+       if (IS_ERR(vchiq_dev)) {
+               err = PTR_ERR(vchiq_dev);
                goto failed_device_create;
+       }
 
        vchiq_debugfs_init();
 
index b370985b58a101f65e561c4cdbc43d51a26d1e19..c6bb4aaf9bd02fc18b6ca9e1bbc7a37e5724c805 100644 (file)
@@ -1033,8 +1033,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
                return;
        }
 
-       MACvIntDisable(priv->PortOffset);
-
        spin_lock_irqsave(&priv->lock, flags);
 
        /* Read low level stats */
@@ -1122,8 +1120,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
        }
 
        spin_unlock_irqrestore(&priv->lock, flags);
-
-       MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
 }
 
 static void vnt_interrupt_work(struct work_struct *work)
@@ -1133,14 +1129,17 @@ static void vnt_interrupt_work(struct work_struct *work)
 
        if (priv->vif)
                vnt_interrupt_process(priv);
+
+       MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
 }
 
 static irqreturn_t vnt_interrupt(int irq,  void *arg)
 {
        struct vnt_private *priv = arg;
 
-       if (priv->vif)
-               schedule_work(&priv->interrupt_work);
+       schedule_work(&priv->interrupt_work);
+
+       MACvIntDisable(priv->PortOffset);
 
        return IRQ_HANDLED;
 }
index 720760cd493feb4583bffde64edd5253dd35e380..ba39647a690c3e37ed06b1ff3a9395302277fa76 100644 (file)
@@ -119,8 +119,7 @@ static const struct debugfs_reg32 bcm2835_thermal_regs[] = {
 
 static void bcm2835_thermal_debugfs(struct platform_device *pdev)
 {
-       struct thermal_zone_device *tz = platform_get_drvdata(pdev);
-       struct bcm2835_thermal_data *data = tz->devdata;
+       struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
        struct debugfs_regset32 *regset;
 
        data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL);
@@ -266,7 +265,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
 
        data->tz = tz;
 
-       platform_set_drvdata(pdev, tz);
+       platform_set_drvdata(pdev, data);
 
        /*
         * Thermal_zone doesn't enable hwmon as default,
@@ -290,8 +289,8 @@ err_clk:
 
 static int bcm2835_thermal_remove(struct platform_device *pdev)
 {
-       struct thermal_zone_device *tz = platform_get_drvdata(pdev);
-       struct bcm2835_thermal_data *data = tz->devdata;
+       struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
+       struct thermal_zone_device *tz = data->tz;
 
        debugfs_remove_recursive(data->debugfsdir);
        thermal_zone_of_sensor_unregister(&pdev->dev, tz);
index 6fff16113628743ae9a6b006799fb376abb39198..f7c1f49ec87f2a397d882ca595421e26d71df2b3 100644 (file)
@@ -536,12 +536,11 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
                               struct thermal_zone_device *tz, u32 power,
                               unsigned long *state)
 {
-       unsigned int cur_freq, target_freq;
+       unsigned int target_freq;
        u32 last_load, normalised_power;
        struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
        struct cpufreq_policy *policy = cpufreq_cdev->policy;
 
-       cur_freq = cpufreq_quick_get(policy->cpu);
        power = power > 0 ? power : 0;
        last_load = cpufreq_cdev->last_load ?: 1;
        normalised_power = (power * 100) / last_load;
index 61ca7ce3624ed4298c6b0b99ee2d5e63f0f534cc..5f3ed24e26ec78fbf3bd3a93b1f91d1845d67f2a 100644 (file)
@@ -22,6 +22,13 @@ enum int3400_thermal_uuid {
        INT3400_THERMAL_PASSIVE_1,
        INT3400_THERMAL_ACTIVE,
        INT3400_THERMAL_CRITICAL,
+       INT3400_THERMAL_ADAPTIVE_PERFORMANCE,
+       INT3400_THERMAL_EMERGENCY_CALL_MODE,
+       INT3400_THERMAL_PASSIVE_2,
+       INT3400_THERMAL_POWER_BOSS,
+       INT3400_THERMAL_VIRTUAL_SENSOR,
+       INT3400_THERMAL_COOLING_MODE,
+       INT3400_THERMAL_HARDWARE_DUTY_CYCLING,
        INT3400_THERMAL_MAXIMUM_UUID,
 };
 
@@ -29,6 +36,13 @@ static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
        "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
        "3A95C389-E4B8-4629-A526-C52C88626BAE",
        "97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
+       "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D",
+       "5349962F-71E6-431D-9AE8-0A635B710AEE",
+       "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
+       "F5A35014-C209-46A4-993A-EB56DE7530A1",
+       "6ED722A7-9240-48A5-B479-31EEF723D7CF",
+       "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531",
+       "BE84BABF-C4D4-403D-B495-3128FD44dAC1",
 };
 
 struct int3400_thermal_priv {
@@ -299,10 +313,9 @@ static int int3400_thermal_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, priv);
 
-       if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
-               int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
-               int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
-       }
+       int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
+       int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
+
        priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
                                                priv, &int3400_thermal_ops,
                                                &int3400_thermal_params, 0, 0);
index 7571f7c2e7c9abd488cf9ee3a6cdb223257f80ab..ac7256b5f020519608a0ec3bd4a2cb8fd652e489 100644 (file)
@@ -101,7 +101,7 @@ struct powerclamp_worker_data {
        bool clamping;
 };
 
-static struct powerclamp_worker_data * __percpu worker_data;
+static struct powerclamp_worker_data __percpu *worker_data;
 static struct thermal_cooling_device *cooling_dev;
 static unsigned long *cpu_clamping_mask;  /* bit map for tracking per cpu
                                           * clamping kthread worker
@@ -494,7 +494,7 @@ static void start_power_clamp_worker(unsigned long cpu)
        struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
        struct kthread_worker *worker;
 
-       worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu);
+       worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu);
        if (IS_ERR(worker))
                return;
 
index 5c07a61447d3fc1bb469ab531a876a27fae44739..e4ea7f6aef20e335e160ad11605bc4f318e17739 100644 (file)
@@ -199,6 +199,9 @@ enum {
 #define MT7622_TS1     0
 #define MT7622_NUM_CONTROLLER          1
 
+/* The maximum number of banks */
+#define MAX_NUM_ZONES          8
+
 /* The calibration coefficient of sensor  */
 #define MT7622_CALIBRATION     165
 
@@ -249,7 +252,7 @@ struct mtk_thermal_data {
        const int num_controller;
        const int *controller_offset;
        bool need_switch_bank;
-       struct thermal_bank_cfg bank_data[];
+       struct thermal_bank_cfg bank_data[MAX_NUM_ZONES];
 };
 
 struct mtk_thermal {
@@ -268,7 +271,7 @@ struct mtk_thermal {
        s32 vts[MAX_NUM_VTS];
 
        const struct mtk_thermal_data *conf;
-       struct mtk_thermal_bank banks[];
+       struct mtk_thermal_bank banks[MAX_NUM_ZONES];
 };
 
 /* MT8183 thermal sensor data */
index 48eef552cba48edb5d60f7a75b715f9536e1b5ae..fc9399d9c0820d59a4321520fd5dcf9242620867 100644 (file)
@@ -666,7 +666,7 @@ static int exynos_get_temp(void *p, int *temp)
        struct exynos_tmu_data *data = p;
        int value, ret = 0;
 
-       if (!data || !data->tmu_read || !data->enabled)
+       if (!data || !data->tmu_read)
                return -EINVAL;
        else if (!data->enabled)
                /*
index b121d8f8f3d7d1a9d1dfc4341d61d51a5253a3f1..27aeca30eeae16845644a6edaf2f53d798ca2609 100644 (file)
@@ -266,7 +266,7 @@ MODULE_PARM_DESC(pc104_3, "set interface types for ISA(PC104) board #3 (e.g. pc1
 module_param_array(pc104_4, ulong, NULL, 0);
 MODULE_PARM_DESC(pc104_4, "set interface types for ISA(PC104) board #4 (e.g. pc104_4=232,232,485,485,...");
 
-static int rp_init(void);
+static int __init rp_init(void);
 static void rp_cleanup_module(void);
 
 module_init(rp_init);
index db5df3d548188b6c440db8928ac157b5512174bb..3bdd56a1021b26d6e74ff98ad6f0e25f25f5de08 100644 (file)
@@ -49,11 +49,6 @@ struct ar933x_uart_port {
        struct clk              *clk;
 };
 
-static inline bool ar933x_uart_console_enabled(void)
-{
-       return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
-}
-
 static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
                                            int offset)
 {
@@ -508,6 +503,7 @@ static const struct uart_ops ar933x_uart_ops = {
        .verify_port    = ar933x_uart_verify_port,
 };
 
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
 static struct ar933x_uart_port *
 ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
 
@@ -604,14 +600,7 @@ static struct console ar933x_uart_console = {
        .index          = -1,
        .data           = &ar933x_uart_driver,
 };
-
-static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
-{
-       if (!ar933x_uart_console_enabled())
-               return;
-
-       ar933x_console_ports[up->port.line] = up;
-}
+#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
 
 static struct uart_driver ar933x_uart_driver = {
        .owner          = THIS_MODULE,
@@ -700,7 +689,9 @@ static int ar933x_uart_probe(struct platform_device *pdev)
        baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
        up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
 
-       ar933x_uart_add_console_port(up);
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+       ar933x_console_ports[up->port.line] = up;
+#endif
 
        ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
        if (ret)
@@ -749,8 +740,9 @@ static int __init ar933x_uart_init(void)
 {
        int ret;
 
-       if (ar933x_uart_console_enabled())
-               ar933x_uart_driver.cons = &ar933x_uart_console;
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+       ar933x_uart_driver.cons = &ar933x_uart_console;
+#endif
 
        ret = uart_register_driver(&ar933x_uart_driver);
        if (ret)
index 05147fe243434a52e4ca827227d95dc1f6a0f2e6..0b4f3690532145da4228b8b8255f0d5da928a31f 100644 (file)
@@ -166,6 +166,8 @@ struct atmel_uart_port {
        unsigned int            pending_status;
        spinlock_t              lock_suspended;
 
+       bool                    hd_start_rx;    /* can start RX during half-duplex operation */
+
        /* ISO7816 */
        unsigned int            fidi_min;
        unsigned int            fidi_max;
@@ -231,6 +233,13 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
        __raw_writeb(value, port->membase + ATMEL_US_THR);
 }
 
+static inline int atmel_uart_is_half_duplex(struct uart_port *port)
+{
+       return ((port->rs485.flags & SER_RS485_ENABLED) &&
+               !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
+               (port->iso7816.flags & SER_ISO7816_ENABLED);
+}
+
 #ifdef CONFIG_SERIAL_ATMEL_PDC
 static bool atmel_use_pdc_rx(struct uart_port *port)
 {
@@ -608,10 +617,9 @@ static void atmel_stop_tx(struct uart_port *port)
        /* Disable interrupts */
        atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
 
-       if (((port->rs485.flags & SER_RS485_ENABLED) &&
-            !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
-           port->iso7816.flags & SER_ISO7816_ENABLED)
+       if (atmel_uart_is_half_duplex(port))
                atmel_start_rx(port);
+
 }
 
 /*
@@ -628,9 +636,7 @@ static void atmel_start_tx(struct uart_port *port)
                return;
 
        if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
-               if (((port->rs485.flags & SER_RS485_ENABLED) &&
-                    !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
-                   port->iso7816.flags & SER_ISO7816_ENABLED)
+               if (atmel_uart_is_half_duplex(port))
                        atmel_stop_rx(port);
 
        if (atmel_use_pdc_tx(port))
@@ -928,11 +934,14 @@ static void atmel_complete_tx_dma(void *arg)
         */
        if (!uart_circ_empty(xmit))
                atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
-       else if (((port->rs485.flags & SER_RS485_ENABLED) &&
-                 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
-                port->iso7816.flags & SER_ISO7816_ENABLED) {
-               /* DMA done, stop TX, start RX for RS485 */
-               atmel_start_rx(port);
+       else if (atmel_uart_is_half_duplex(port)) {
+               /*
+                * DMA done, re-enable TXEMPTY and signal that we can stop
+                * TX and start RX for RS485
+                */
+               atmel_port->hd_start_rx = true;
+               atmel_uart_writel(port, ATMEL_US_IER,
+                                 atmel_port->tx_done_mask);
        }
 
        spin_unlock_irqrestore(&port->lock, flags);
@@ -1288,6 +1297,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
                                         sg_dma_len(&atmel_port->sg_rx)/2,
                                         DMA_DEV_TO_MEM,
                                         DMA_PREP_INTERRUPT);
+       if (!desc) {
+               dev_err(port->dev, "Preparing DMA cyclic failed\n");
+               goto chan_err;
+       }
        desc->callback = atmel_complete_rx_dma;
        desc->callback_param = port;
        atmel_port->desc_rx = desc;
@@ -1376,9 +1389,20 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 
        if (pending & atmel_port->tx_done_mask) {
-               /* Either PDC or interrupt transmission */
                atmel_uart_writel(port, ATMEL_US_IDR,
                                  atmel_port->tx_done_mask);
+
+               /* Start RX if flag was set and FIFO is empty */
+               if (atmel_port->hd_start_rx) {
+                       if (!(atmel_uart_readl(port, ATMEL_US_CSR)
+                                       & ATMEL_US_TXEMPTY))
+                               dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
+
+                       atmel_port->hd_start_rx = false;
+                       atmel_start_rx(port);
+                       return;
+               }
+
                atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
        }
 }
@@ -1508,9 +1532,7 @@ static void atmel_tx_pdc(struct uart_port *port)
                atmel_uart_writel(port, ATMEL_US_IER,
                                  atmel_port->tx_done_mask);
        } else {
-               if (((port->rs485.flags & SER_RS485_ENABLED) &&
-                    !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
-                   port->iso7816.flags & SER_ISO7816_ENABLED) {
+               if (atmel_uart_is_half_duplex(port)) {
                        /* DMA done, stop TX, start RX for RS485 */
                        atmel_start_rx(port);
                }
index 6fb312e7af713ecd3efcc4c0ef069602635f7681..bfe5e9e034ecf86b3de80476eb90a44a0d228359 100644 (file)
@@ -148,8 +148,10 @@ static int configure_kgdboc(void)
        char *cptr = config;
        struct console *cons;
 
-       if (!strlen(config) || isspace(config[0]))
+       if (!strlen(config) || isspace(config[0])) {
+               err = 0;
                goto noconfig;
+       }
 
        kgdboc_io_ops.is_console = 0;
        kgdb_tty_driver = NULL;
index f5bdde40562750c7695823e93bab799bf5593ad5..450ba6d7996c229e7e3a796439f8e6caa1dbaebb 100644 (file)
@@ -1415,6 +1415,8 @@ static int max310x_spi_probe(struct spi_device *spi)
        if (spi->dev.of_node) {
                const struct of_device_id *of_id =
                        of_match_device(max310x_dt_ids, &spi->dev);
+               if (!of_id)
+                       return -ENODEV;
 
                devtype = (struct max310x_devtype *)of_id->data;
        } else {
index 231f751d1ef48b42e4a9820c73408242862035e9..7e7b1559fa3695406ae80edda49f0f1f7634dc9a 100644 (file)
@@ -810,6 +810,9 @@ static int mvebu_uart_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
+       if (!match)
+               return -ENODEV;
+
        /* Assume that all UART ports have a DT alias or none has */
        id = of_alias_get_id(pdev->dev.of_node, "serial");
        if (!pdev->dev.of_node || id < 0)
index 27235a526cce8c4b59aa14f6764e466b10988748..4c188f4079b3ea68ee51982b41d4b14ba27567c4 100644 (file)
@@ -1686,6 +1686,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
 
        s->port.mapbase = r->start;
        s->port.membase = ioremap(r->start, resource_size(r));
+       if (!s->port.membase) {
+               ret = -ENOMEM;
+               goto out_disable_clks;
+       }
        s->port.ops = &mxs_auart_ops;
        s->port.iotype = UPIO_MEM;
        s->port.fifosize = MXS_AUART_FIFO_SIZE;
index 3bcec1c20219102b277aafc72b548425df33e175..35e5f9c5d5bed48274363343366c8bd76395c500 100644 (file)
@@ -1050,7 +1050,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options)
 {
        struct uart_port *uport;
        struct qcom_geni_serial_port *port;
-       int baud;
+       int baud = 9600;
        int bits = 8;
        int parity = 'n';
        int flow = 'n';
index 635178cf3eed538aa35bf49225a4886097e2e7b0..a31db15cd7c0d36bf2e4dee32d7b1201bc2674c5 100644 (file)
@@ -1507,7 +1507,7 @@ static int __init sc16is7xx_init(void)
        ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
        if (ret < 0) {
                pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
-               return ret;
+               goto err_i2c;
        }
 #endif
 
@@ -1515,10 +1515,20 @@ static int __init sc16is7xx_init(void)
        ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
        if (ret < 0) {
                pr_err("failed to init sc16is7xx spi --> %d\n", ret);
-               return ret;
+               goto err_spi;
        }
 #endif
        return ret;
+
+#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
+err_spi:
+#endif
+#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
+       i2c_del_driver(&sc16is7xx_i2c_uart_driver);
+err_i2c:
+#endif
+       uart_unregister_driver(&sc16is7xx_uart);
+       return ret;
 }
 module_init(sc16is7xx_init);
 
index 060fcd42b6d560105a114c9923ce1cdcc177b696..3cd139752d3f70f9dfce1fe2c43f3eab03cf433a 100644 (file)
@@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port)
 
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                uart_write_wakeup(port);
-       if (uart_circ_empty(xmit)) {
+       if (uart_circ_empty(xmit))
                sci_stop_tx(port);
-       } else {
-               ctrl = serial_port_in(port, SCSCR);
-
-               if (port->type != PORT_SCI) {
-                       serial_port_in(port, SCxSR); /* Dummy read */
-                       sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
-               }
 
-               ctrl |= SCSCR_TIE;
-               serial_port_out(port, SCSCR, ctrl);
-       }
 }
 
 /* On SH3, SCIF may read end-of-break as a space->mark char */
@@ -2522,14 +2512,16 @@ done:
                         * center of the last stop bit in sampling clocks.
                         */
                        int last_stop = bits * 2 - 1;
-                       int deviation = min_err * srr * last_stop / 2 / baud;
+                       int deviation = DIV_ROUND_CLOSEST(min_err * last_stop *
+                                                         (int)(srr + 1),
+                                                         2 * (int)baud);
 
                        if (abs(deviation) >= 2) {
                                /* At least two sampling clocks off at the
                                 * last stop bit; we can increase the error
                                 * margin by shifting the sampling point.
                                 */
-                               int shift = min(-8, max(7, deviation / 2));
+                               int shift = clamp(deviation / 2, -8, 7);
 
                                hssrr |= (shift << HSCIF_SRHP_SHIFT) &
                                         HSCIF_SRHP_MASK;
index 044c3cbdcfa40664497d13bd00e607584eff99c7..a9e12b3bc31d7e19966c724b8b31ce3ac64c5242 100644 (file)
@@ -325,7 +325,7 @@ static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty)
                if (tty && C_HUPCL(tty))
                        tty_port_lower_dtr_rts(port);
 
-               if (port->ops->shutdown)
+               if (port->ops && port->ops->shutdown)
                        port->ops->shutdown(port);
        }
 out:
@@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(tty_port_tty_wakeup);
  */
 int tty_port_carrier_raised(struct tty_port *port)
 {
-       if (port->ops->carrier_raised == NULL)
+       if (!port->ops || !port->ops->carrier_raised)
                return 1;
        return port->ops->carrier_raised(port);
 }
@@ -414,7 +414,7 @@ EXPORT_SYMBOL(tty_port_carrier_raised);
  */
 void tty_port_raise_dtr_rts(struct tty_port *port)
 {
-       if (port->ops->dtr_rts)
+       if (port->ops && port->ops->dtr_rts)
                port->ops->dtr_rts(port, 1);
 }
 EXPORT_SYMBOL(tty_port_raise_dtr_rts);
@@ -429,7 +429,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts);
  */
 void tty_port_lower_dtr_rts(struct tty_port *port)
 {
-       if (port->ops->dtr_rts)
+       if (port->ops && port->ops->dtr_rts)
                port->ops->dtr_rts(port, 0);
 }
 EXPORT_SYMBOL(tty_port_lower_dtr_rts);
@@ -684,7 +684,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
 
        if (!tty_port_initialized(port)) {
                clear_bit(TTY_IO_ERROR, &tty->flags);
-               if (port->ops->activate) {
+               if (port->ops && port->ops->activate) {
                        int retval = port->ops->activate(port, tty);
                        if (retval) {
                                mutex_unlock(&port->mutex);
index d34984aa646dc4d30813fdfb91290fbef958d0fb..650c66886c80f5d1c9770321949251af17e112a6 100644 (file)
@@ -1520,7 +1520,8 @@ static void csi_J(struct vc_data *vc, int vpar)
                        return;
        }
        scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
-       update_region(vc, (unsigned long) start, count);
+       if (con_should_update(vc))
+               do_update_region(vc, (unsigned long) start, count);
        vc->vc_need_wrap = 0;
 }
 
index 739f8960811ac89d6f960a184155f4e0c602101a..ec666eb4b7b445d98cbc3ff59be63c1b7aa90437 100644 (file)
@@ -558,10 +558,8 @@ static void acm_softint(struct work_struct *work)
                clear_bit(EVENT_RX_STALL, &acm->flags);
        }
 
-       if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) {
+       if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
                tty_port_tty_wakeup(&acm->port);
-               clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
-       }
 }
 
 /*
index 48277bbc15e4d155fc9c1c7315fcf57d6347b51e..73c8e65917461f8f83d9233c96bdf0d2b8956b27 100644 (file)
@@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
 
        do {
                controller = of_find_node_with_property(controller, "phys");
+               if (!of_device_is_available(controller))
+                       continue;
                index = 0;
                do {
                        if (arg0 == -1) {
index 8987cec9549dd0d7fd75323c405504efb33e296a..ebcadaad89d1dcfa31e65ab439daeada102f5ff5 100644 (file)
@@ -473,11 +473,6 @@ static int usb_unbind_interface(struct device *dev)
                pm_runtime_disable(dev);
        pm_runtime_set_suspended(dev);
 
-       /* Undo any residual pm_autopm_get_interface_* calls */
-       for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r)
-               usb_autopm_put_interface_no_suspend(intf);
-       atomic_set(&intf->pm_usage_cnt, 0);
-
        if (!error)
                usb_autosuspend_device(udev);
 
@@ -1633,7 +1628,6 @@ void usb_autopm_put_interface(struct usb_interface *intf)
        int                     status;
 
        usb_mark_last_busy(udev);
-       atomic_dec(&intf->pm_usage_cnt);
        status = pm_runtime_put_sync(&intf->dev);
        dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
                        __func__, atomic_read(&intf->dev.power.usage_count),
@@ -1662,7 +1656,6 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
        int                     status;
 
        usb_mark_last_busy(udev);
-       atomic_dec(&intf->pm_usage_cnt);
        status = pm_runtime_put(&intf->dev);
        dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
                        __func__, atomic_read(&intf->dev.power.usage_count),
@@ -1684,7 +1677,6 @@ void usb_autopm_put_interface_no_suspend(struct usb_interface *intf)
        struct usb_device       *udev = interface_to_usbdev(intf);
 
        usb_mark_last_busy(udev);
-       atomic_dec(&intf->pm_usage_cnt);
        pm_runtime_put_noidle(&intf->dev);
 }
 EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
@@ -1715,8 +1707,6 @@ int usb_autopm_get_interface(struct usb_interface *intf)
        status = pm_runtime_get_sync(&intf->dev);
        if (status < 0)
                pm_runtime_put_sync(&intf->dev);
-       else
-               atomic_inc(&intf->pm_usage_cnt);
        dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
                        __func__, atomic_read(&intf->dev.power.usage_count),
                        status);
@@ -1750,8 +1740,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
        status = pm_runtime_get(&intf->dev);
        if (status < 0 && status != -EINPROGRESS)
                pm_runtime_put_noidle(&intf->dev);
-       else
-               atomic_inc(&intf->pm_usage_cnt);
        dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
                        __func__, atomic_read(&intf->dev.power.usage_count),
                        status);
@@ -1775,7 +1763,6 @@ void usb_autopm_get_interface_no_resume(struct usb_interface *intf)
        struct usb_device       *udev = interface_to_usbdev(intf);
 
        usb_mark_last_busy(udev);
-       atomic_inc(&intf->pm_usage_cnt);
        pm_runtime_get_noresume(&intf->dev);
 }
 EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
index 3189181bb628d921309d44296da06eff92b5d1bd..975d7c1288e36534bdd08be7e84642acdba535c7 100644 (file)
@@ -2741,6 +2741,9 @@ int usb_add_hcd(struct usb_hcd *hcd,
 
                retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
                                                  PHY_MODE_USB_HOST_SS);
+               if (retval)
+                       retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
+                                                         PHY_MODE_USB_HOST);
                if (retval)
                        goto err_usb_phy_roothub_power_on;
 
index 82239f27c4ccf822daca19fe2e9c6cffb19933c7..e844bb7b5676a4525724d25883231af474cdd8e0 100644 (file)
@@ -820,9 +820,11 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
 
        if (dev->state == USB_STATE_SUSPENDED)
                return -EHOSTUNREACH;
-       if (size <= 0 || !buf || !index)
+       if (size <= 0 || !buf)
                return -EINVAL;
        buf[0] = 0;
+       if (index <= 0 || index >= 256)
+               return -EINVAL;
        tbuf = kmalloc(256, GFP_NOIO);
        if (!tbuf)
                return -ENOMEM;
index fdc6e4e403e81736db077e0c7cdf212aa6da2874..8cced3609e243b186caedd3eeb79e338c6151a73 100644 (file)
@@ -29,6 +29,7 @@
 #define PCI_DEVICE_ID_INTEL_BXT_M              0x1aaa
 #define PCI_DEVICE_ID_INTEL_APL                        0x5aaa
 #define PCI_DEVICE_ID_INTEL_KBP                        0xa2b0
+#define PCI_DEVICE_ID_INTEL_CMLH               0x02ee
 #define PCI_DEVICE_ID_INTEL_GLK                        0x31aa
 #define PCI_DEVICE_ID_INTEL_CNPLP              0x9dee
 #define PCI_DEVICE_ID_INTEL_CNPH               0xa36e
@@ -305,6 +306,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
          (kernel_ulong_t) &dwc3_pci_mrfld_properties, },
 
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
+         (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP),
          (kernel_ulong_t) &dwc3_pci_intel_properties, },
 
index 75b113a5b25cb6af28a8d7776e58678f6fcf7202..f3816a5c861eeeafdf1230afc1e7ca8fe41efa55 100644 (file)
@@ -391,20 +391,20 @@ try_again:
        req->complete = f_hidg_req_complete;
        req->context  = hidg;
 
+       spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+
        status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
        if (status < 0) {
                ERROR(hidg->func.config->cdev,
                        "usb_ep_queue error on int endpoint %zd\n", status);
-               goto release_write_pending_unlocked;
+               goto release_write_pending;
        } else {
                status = count;
        }
-       spin_unlock_irqrestore(&hidg->write_spinlock, flags);
 
        return status;
 release_write_pending:
        spin_lock_irqsave(&hidg->write_spinlock, flags);
-release_write_pending_unlocked:
        hidg->write_pending = 0;
        spin_unlock_irqrestore(&hidg->write_spinlock, flags);
 
index baf72f95f0f1cb38ff443bc802a258b6f04ef899..213b52508621eb591935869b00ac443e62d7b5b4 100644 (file)
@@ -979,8 +979,18 @@ static int dummy_udc_start(struct usb_gadget *g,
        struct dummy_hcd        *dum_hcd = gadget_to_dummy_hcd(g);
        struct dummy            *dum = dum_hcd->dum;
 
-       if (driver->max_speed == USB_SPEED_UNKNOWN)
+       switch (g->speed) {
+       /* All the speeds we support */
+       case USB_SPEED_LOW:
+       case USB_SPEED_FULL:
+       case USB_SPEED_HIGH:
+       case USB_SPEED_SUPER:
+               break;
+       default:
+               dev_err(dummy_dev(dum_hcd), "Unsupported driver max speed %d\n",
+                               driver->max_speed);
                return -EINVAL;
+       }
 
        /*
         * SLAVE side init ... the layer above hardware, which
@@ -1784,9 +1794,10 @@ static void dummy_timer(struct timer_list *t)
                /* Bus speed is 500000 bytes/ms, so use a little less */
                total = 490000;
                break;
-       default:
+       default:        /* Can't happen */
                dev_err(dummy_dev(dum_hcd), "bogus device speed\n");
-               return;
+               total = 0;
+               break;
        }
 
        /* FIXME if HZ != 1000 this will probably misbehave ... */
@@ -1828,7 +1839,7 @@ restart:
 
                /* Used up this frame's bandwidth? */
                if (total <= 0)
-                       break;
+                       continue;
 
                /* find the gadget's ep for this request (if configured) */
                address = usb_pipeendpoint (urb->pipe);
index b77f3126580ebb937986e7ced5b28739dd25dea4..c2011cd7df8cf5fbf0c5a5db153c37a1b0f451a6 100644 (file)
@@ -945,6 +945,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
                        break;
        }
        if (&req->req != _req) {
+               ep->stopped = stopped;
                spin_unlock_irqrestore(&ep->dev->lock, flags);
                return -EINVAL;
        }
index f63f82450bf4e4960414eb80b3020fda6bfa8c3f..898339e5df10d83d211942609a9bd695f199e787 100644 (file)
@@ -866,9 +866,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
        (void) readl(&ep->dev->pci->pcimstctl);
 
        writel(BIT(DMA_START), &dma->dmastat);
-
-       if (!ep->is_in)
-               stop_out_naking(ep);
 }
 
 static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
@@ -907,6 +904,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
                        writel(BIT(DMA_START), &dma->dmastat);
                        return;
                }
+               stop_out_naking(ep);
        }
 
        tmp = dmactl_default;
@@ -1275,9 +1273,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
                        break;
        }
        if (&req->req != _req) {
+               ep->stopped = stopped;
                spin_unlock_irqrestore(&ep->dev->lock, flags);
-               dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n",
-                                                               __func__);
+               ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
                return -EINVAL;
        }
 
index 934584f0a20a7bee30adcee141c0a7eb63a51b3f..6343fbacd2442adea634a9911bed82cf1603c417 100644 (file)
@@ -3204,6 +3204,9 @@ static int __init u132_hcd_init(void)
        printk(KERN_INFO "driver %s\n", hcd_name);
        workqueue = create_singlethread_workqueue("u132");
        retval = platform_driver_register(&u132_platform_driver);
+       if (retval)
+               destroy_workqueue(workqueue);
+
        return retval;
 }
 
index c78be578abb065af0e0715352f8c502345907ad7..d932cc31711e8a0a872efaf35d2ab347fa45106d 100644 (file)
@@ -516,7 +516,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
                return -1;
 
        writel(0, &dbc->regs->control);
-       xhci_dbc_mem_cleanup(xhci);
        dbc->state = DS_DISABLED;
 
        return 0;
@@ -562,8 +561,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
        ret = xhci_do_dbc_stop(xhci);
        spin_unlock_irqrestore(&dbc->lock, flags);
 
-       if (!ret)
+       if (!ret) {
+               xhci_dbc_mem_cleanup(xhci);
                pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
+       }
 }
 
 static void
index e2eece6936556b06be37e43a29a3c8554c722203..96a740543183729bb702244151ebb95d88acd97f 100644 (file)
@@ -1545,20 +1545,25 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
        port_index = max_ports;
        while (port_index--) {
                u32 t1, t2;
-
+               int retries = 10;
+retry:
                t1 = readl(ports[port_index]->addr);
                t2 = xhci_port_state_to_neutral(t1);
                portsc_buf[port_index] = 0;
 
-               /* Bail out if a USB3 port has a new device in link training */
-               if ((hcd->speed >= HCD_USB3) &&
+               /*
+                * Give a USB3 port in link training time to finish, but don't
+                * prevent suspend as port might be stuck
+                */
+               if ((hcd->speed >= HCD_USB3) && retries-- &&
                    (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
-                       bus_state->bus_suspended = 0;
                        spin_unlock_irqrestore(&xhci->lock, flags);
-                       xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
-                       return -EBUSY;
+                       msleep(XHCI_PORT_POLLING_LFPS_TIME);
+                       spin_lock_irqsave(&xhci->lock, flags);
+                       xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n",
+                                port_index);
+                       goto retry;
                }
-
                /* suspend ports in U0, or bail out for new connect changes */
                if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
                        if ((t1 & PORT_CSC) && wake_enabled) {
index a6e4637157799769cc0f77b6b2f815c7c4ad6490..671bce18782c5a788ad1af896ab9066fd4078839 100644 (file)
@@ -246,6 +246,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
        if (!xhci_rcar_wait_for_pll_active(hcd))
                return -ETIMEDOUT;
 
+       xhci->quirks |= XHCI_TRUST_TX_LENGTH;
        return xhci_rcar_download_firmware(hcd);
 }
 
index 40fa25c4d0419851bac800bdd74d29bb6f7e0fee..9215a28dad406a724959f0315a0444525d0edb90 100644 (file)
@@ -1647,10 +1647,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
                }
        }
 
-       if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 &&
-                       DEV_SUPERSPEED_ANY(portsc)) {
+       if ((portsc & PORT_PLC) &&
+           DEV_SUPERSPEED_ANY(portsc) &&
+           ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
+            (portsc & PORT_PLS_MASK) == XDEV_U1 ||
+            (portsc & PORT_PLS_MASK) == XDEV_U2)) {
                xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
-               /* We've just brought the device into U0 through either the
+               /* We've just brought the device into U0/1/2 through either the
                 * Resume state after a device remote wakeup, or through the
                 * U3Exit state after a host-initiated resume.  If it's a device
                 * initiated remote wake, don't pass up the link state change,
index 652dc36e30129c9f15a703b640a52c8f2685a82c..9334cdee382a67a8b783b13b9c88a2d1dd4c3da3 100644 (file)
@@ -452,6 +452,14 @@ struct xhci_op_regs {
  */
 #define XHCI_DEFAULT_BESL      4
 
+/*
+ * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
+ * to complete link training. usually link trainig completes much faster
+ * so check status 10 times with 36ms sleep in places we need to wait for
+ * polling to complete.
+ */
+#define XHCI_PORT_POLLING_LFPS_TIME  36
+
 /**
  * struct xhci_intr_reg - Interrupt Register Set
  * @irq_pending:       IMAN - Interrupt Management Register.  Used to enable
index 4d72b7d1d383be2643d09756f11f709a8b1906a5..04684849d68320862a4de40d5de5e5db4487cd7e 100644 (file)
@@ -547,7 +547,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
         */
        hub->port_swap = USB251XB_DEF_PORT_SWAP;
        of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) {
-               if ((port >= 0) && (port <= data->port_cnt))
+               if (port <= data->port_cnt)
                        hub->port_swap |= BIT(port);
        }
 
@@ -612,7 +612,7 @@ static int usb251xb_probe(struct usb251xb *hub)
                                                           dev);
        int err;
 
-       if (np) {
+       if (np && of_id) {
                err = usb251xb_get_ofdata(hub,
                                          (struct usb251xb_data *)of_id->data);
                if (err) {
index 6d9fd5f649036e8fb47c39eaeffa26f856724e99..7b306aa22d2589518d696111cb2750bdc3bed4c0 100644 (file)
@@ -314,6 +314,7 @@ static void yurex_disconnect(struct usb_interface *interface)
        usb_deregister_dev(interface, &yurex_class);
 
        /* prevent more I/O from starting */
+       usb_poison_urb(dev->urb);
        mutex_lock(&dev->io_mutex);
        dev->interface = NULL;
        mutex_unlock(&dev->io_mutex);
index bcc23486c4ed2813da698e14faf0e11366577afb..928c2cd6fc0084ef0feb7f79edf6d577e8fa5a46 100644 (file)
@@ -6,6 +6,7 @@ config USB_MTU3
        tristate "MediaTek USB3 Dual Role controller"
        depends on USB || USB_GADGET
        depends on ARCH_MEDIATEK || COMPILE_TEST
+       depends on EXTCON || !EXTCON
        select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
        help
          Say Y or M here if your system runs on MediaTek SoCs with
index fffe23ab0189a00b1a7747662c9248cfd41770ae..979bef9bfb6bc7189e2c16d8dc00ae4ae82d4854 100644 (file)
@@ -80,6 +80,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
        { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
        { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
+       { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
        { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
        { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
        { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
index 8f5b1747175945f8830a909803acdc10b48f7de4..1d8461ae2c340324f64c7796a5be644037f84ddb 100644 (file)
@@ -609,6 +609,8 @@ static const struct usb_device_id id_table_combined[] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
index b863bedb55a138b7a99abe0a82aab39e10b79e3f..5755f0df002589403366a75acec50a40fc86b955 100644 (file)
 /*
  * NovaTech product ids (FTDI_VID)
  */
-#define FTDI_NT_ORIONLXM_PID   0x7c90  /* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLXM_PID           0x7c90  /* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLX_PLUS_PID       0x7c91  /* OrionLX+ Substation Automation Platform */
+#define FTDI_NT_ORION_IO_PID           0x7c92  /* Orion I/O */
 
 /*
  * Synapse Wireless product ids (FTDI_VID)
index fc52ac75fbf66f0f2e7ac2f8951b8df6ea2ba49f..18110225d50606abaefe2e0c90490ffff0888f41 100644 (file)
@@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
        if (!urbtrack)
                return -ENOMEM;
 
-       kref_get(&mos_parport->ref_count);
-       urbtrack->mos_parport = mos_parport;
        urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
        if (!urbtrack->urb) {
                kfree(urbtrack);
@@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
                             usb_sndctrlpipe(usbdev, 0),
                             (unsigned char *)urbtrack->setup,
                             NULL, 0, async_complete, urbtrack);
+       kref_get(&mos_parport->ref_count);
+       urbtrack->mos_parport = mos_parport;
        kref_init(&urbtrack->ref_count);
        INIT_LIST_HEAD(&urbtrack->urblist_entry);
 
index 11b21d9410f35306d299339d7a29554aa9cf47e3..83869065b8022ba68b145db6756cc6f2f9e3b941 100644 (file)
@@ -246,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
 #define QUECTEL_PRODUCT_EC25                   0x0125
 #define QUECTEL_PRODUCT_BG96                   0x0296
 #define QUECTEL_PRODUCT_EP06                   0x0306
+#define QUECTEL_PRODUCT_EM12                   0x0512
 
 #define CMOTECH_VENDOR_ID                      0x16d8
 #define CMOTECH_PRODUCT_6001                   0x6001
@@ -1066,7 +1067,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(3) },
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
-       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
+         .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
        /* Quectel products using Qualcomm vendor ID */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
        { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
@@ -1087,6 +1089,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
+         .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1940,10 +1945,12 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) },
        { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),                     /* D-Link DWM-222 */
          .driver_info = RSVD(4) },
-       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
-       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
-       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
-       { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) },    /* D-Link DWM-152/C1 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) },    /* D-Link DWM-156/C1 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) },    /* D-Link DWM-156/A3 */
+       { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff),                     /* Olicard 600 */
+         .driver_info = RSVD(4) },
+       { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                   /* OLICARD300 - MT6225 */
        { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
        { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
index 31b0244419387c52ec5dcd13138b4d6a0bd063f7..cc794e25a0b6ed043149685eb1400492a977b2c3 100644 (file)
@@ -763,18 +763,16 @@ static void rts51x_suspend_timer_fn(struct timer_list *t)
                break;
        case RTS51X_STAT_IDLE:
        case RTS51X_STAT_SS:
-               usb_stor_dbg(us, "RTS51X_STAT_SS, intf->pm_usage_cnt:%d, power.usage:%d\n",
-                            atomic_read(&us->pusb_intf->pm_usage_cnt),
+               usb_stor_dbg(us, "RTS51X_STAT_SS, power.usage:%d\n",
                             atomic_read(&us->pusb_intf->dev.power.usage_count));
 
-               if (atomic_read(&us->pusb_intf->pm_usage_cnt) > 0) {
+               if (atomic_read(&us->pusb_intf->dev.power.usage_count) > 0) {
                        usb_stor_dbg(us, "Ready to enter SS state\n");
                        rts51x_set_stat(chip, RTS51X_STAT_SS);
                        /* ignore mass storage interface's children */
                        pm_suspend_ignore_children(&us->pusb_intf->dev, true);
                        usb_autopm_put_interface_async(us->pusb_intf);
-                       usb_stor_dbg(us, "RTS51X_STAT_SS 01, intf->pm_usage_cnt:%d, power.usage:%d\n",
-                                    atomic_read(&us->pusb_intf->pm_usage_cnt),
+                       usb_stor_dbg(us, "RTS51X_STAT_SS 01, power.usage:%d\n",
                                     atomic_read(&us->pusb_intf->dev.power.usage_count));
                }
                break;
@@ -807,11 +805,10 @@ static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
        int ret;
 
        if (working_scsi(srb)) {
-               usb_stor_dbg(us, "working scsi, intf->pm_usage_cnt:%d, power.usage:%d\n",
-                            atomic_read(&us->pusb_intf->pm_usage_cnt),
+               usb_stor_dbg(us, "working scsi, power.usage:%d\n",
                             atomic_read(&us->pusb_intf->dev.power.usage_count));
 
-               if (atomic_read(&us->pusb_intf->pm_usage_cnt) <= 0) {
+               if (atomic_read(&us->pusb_intf->dev.power.usage_count) <= 0) {
                        ret = usb_autopm_get_interface(us->pusb_intf);
                        usb_stor_dbg(us, "working scsi, ret=%d\n", ret);
                }
index 0f62db091d8dab59416fb70af97a44a127503d05..a2233d72ae7c9a919d86f4e0bc39ea26d6cc6aba 100644 (file)
@@ -37,6 +37,7 @@
        S(SRC_ATTACHED),                        \
        S(SRC_STARTUP),                         \
        S(SRC_SEND_CAPABILITIES),               \
+       S(SRC_SEND_CAPABILITIES_TIMEOUT),       \
        S(SRC_NEGOTIATE_CAPABILITIES),          \
        S(SRC_TRANSITION_SUPPLY),               \
        S(SRC_READY),                           \
@@ -2966,10 +2967,34 @@ static void run_state_machine(struct tcpm_port *port)
                        /* port->hard_reset_count = 0; */
                        port->caps_count = 0;
                        port->pd_capable = true;
-                       tcpm_set_state_cond(port, hard_reset_state(port),
+                       tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
                                            PD_T_SEND_SOURCE_CAP);
                }
                break;
+       case SRC_SEND_CAPABILITIES_TIMEOUT:
+               /*
+                * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
+                *
+                * PD 2.0 sinks are supposed to accept src-capabilities with a
+                * 3.0 header and simply ignore any src PDOs which the sink does
+                * not understand such as PPS but some 2.0 sinks instead ignore
+                * the entire PD_DATA_SOURCE_CAP message, causing contract
+                * negotiation to fail.
+                *
+                * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
+                * sending src-capabilities with a lower PD revision to
+                * make these broken sinks work.
+                */
+               if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
+                       tcpm_set_state(port, HARD_RESET_SEND, 0);
+               } else if (port->negotiated_rev > PD_REV20) {
+                       port->negotiated_rev--;
+                       port->hard_reset_count = 0;
+                       tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+               } else {
+                       tcpm_set_state(port, hard_reset_state(port), 0);
+               }
+               break;
        case SRC_NEGOTIATE_CAPABILITIES:
                ret = tcpm_pd_check_request(port);
                if (ret < 0) {
index 423208e19383c0c2cd414d3b627b8f4c48b6f67b..6770afd4076548eeb0021eef062160013b97b1ea 100644 (file)
@@ -615,8 +615,13 @@ static int wcove_typec_probe(struct platform_device *pdev)
        wcove->dev = &pdev->dev;
        wcove->regmap = pmic->regmap;
 
-       irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr,
-                                 platform_get_irq(pdev, 0));
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+               return irq;
+       }
+
+       irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq);
        if (irq < 0)
                return irq;
 
index 97b09a42a10cabe1080f606acba13d35d543cf91..dbfb2f24d71ea4dd974e891d5aed8bea56d793c5 100644 (file)
@@ -361,16 +361,10 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
        }
 
        if (usb_endpoint_xfer_isoc(epd)) {
-               /* validate packet size and number of packets */
-               unsigned int maxp, packets, bytes;
-
-               maxp = usb_endpoint_maxp(epd);
-               maxp *= usb_endpoint_maxp_mult(epd);
-               bytes = pdu->u.cmd_submit.transfer_buffer_length;
-               packets = DIV_ROUND_UP(bytes, maxp);
-
+               /* validate number of packets */
                if (pdu->u.cmd_submit.number_of_packets < 0 ||
-                   pdu->u.cmd_submit.number_of_packets > packets) {
+                   pdu->u.cmd_submit.number_of_packets >
+                   USBIP_MAX_ISO_PACKETS) {
                        dev_err(&sdev->udev->dev,
                                "CMD_SUBMIT: isoc invalid num packets %d\n",
                                pdu->u.cmd_submit.number_of_packets);
index bf8afe9b5883850325fb70fc3873bff20237040b..8be857a4fa132fc1e48d86bfb1f08f9f4fef7202 100644 (file)
@@ -121,6 +121,13 @@ extern struct device_attribute dev_attr_usbip_debug;
 #define USBIP_DIR_OUT  0x00
 #define USBIP_DIR_IN   0x01
 
+/*
+ * Arbitrary limit for the maximum number of isochronous packets in an URB,
+ * compare for example the uhci_submit_isochronous function in
+ * drivers/usb/host/uhci-q.c
+ */
+#define USBIP_MAX_ISO_PACKETS 1024
+
 /**
  * struct usbip_header_basic - data pertinent to every request
  * @command: the usbip request type
index a25659b5a5d17d97272589a2dd1ac7979ed55070..3fa20e95a6bb6446fb2c4aa3d71abf75b611ce33 100644 (file)
@@ -1661,11 +1661,11 @@ static void __init vfio_pci_fill_ids(void)
                rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
                                   subvendor, subdevice, class, class_mask, 0);
                if (rc)
-                       pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
+                       pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
                                vendor, device, subvendor, subdevice,
                                class, class_mask, rc);
                else
-                       pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
+                       pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
                                vendor, device, subvendor, subdevice,
                                class, class_mask);
        }
index 8dbb270998f47121dc0886151cb6e47da8a8e211..6b64e45a52691ffd9dd4809d0e127fc8c450cf80 100644 (file)
@@ -1398,7 +1398,7 @@ unlock_exit:
        mutex_unlock(&container->lock);
 }
 
-const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
+static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
        .name           = "iommu-vfio-powerpc",
        .owner          = THIS_MODULE,
        .open           = tce_iommu_open,
index 73652e21efec6a28393bd979d2d42caef711b280..d0f731c9920a65a44d614181ecf3a4e4c2d90755 100644 (file)
@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
 MODULE_PARM_DESC(disable_hugepages,
                 "Disable VFIO IOMMU support for IOMMU hugepages.");
 
+static unsigned int dma_entry_limit __read_mostly = U16_MAX;
+module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
+MODULE_PARM_DESC(dma_entry_limit,
+                "Maximum number of user DMA mappings per container (65535).");
+
 struct vfio_iommu {
        struct list_head        domain_list;
        struct vfio_domain      *external_domain; /* domain for external user */
        struct mutex            lock;
        struct rb_root          dma_list;
        struct blocking_notifier_head notifier;
+       unsigned int            dma_avail;
        bool                    v2;
        bool                    nesting;
 };
@@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
        vfio_unlink_dma(iommu, dma);
        put_task_struct(dma->task);
        kfree(dma);
+       iommu->dma_avail++;
 }
 
 static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
@@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                goto out_unlock;
        }
 
+       if (!iommu->dma_avail) {
+               ret = -ENOSPC;
+               goto out_unlock;
+       }
+
        dma = kzalloc(sizeof(*dma), GFP_KERNEL);
        if (!dma) {
                ret = -ENOMEM;
                goto out_unlock;
        }
 
+       iommu->dma_avail--;
        dma->iova = iova;
        dma->vaddr = vaddr;
        dma->prot = prot;
@@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
 
        INIT_LIST_HEAD(&iommu->domain_list);
        iommu->dma_list = RB_ROOT;
+       iommu->dma_avail = dma_entry_limit;
        mutex_init(&iommu->lock);
        BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
 
index 5ace833de74620bf1a089186057d766e7e3def63..351af88231ada1145bfb72326f905bfaac3819ca 100644 (file)
@@ -911,8 +911,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem,
                                u64 start, u64 size, u64 end,
                                u64 userspace_addr, int perm)
 {
-       struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
+       struct vhost_umem_node *tmp, *node;
 
+       if (!size)
+               return -EFAULT;
+
+       node = kmalloc(sizeof(*node), GFP_ATOMIC);
        if (!node)
                return -ENOMEM;
 
index ba906876cc454f5e67865ad7af69ee3b37f5f059..9e529cc2b4ffd1bee145ef1584ed7ff546ba0a8b 100644 (file)
@@ -464,7 +464,8 @@ static int efifb_probe(struct platform_device *dev)
        info->apertures->ranges[0].base = efifb_fix.smem_start;
        info->apertures->ranges[0].size = size_remap;
 
-       if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
+       if (efi_enabled(EFI_BOOT) &&
+           !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
                if ((efifb_fix.smem_start + efifb_fix.smem_len) >
                    (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
                        pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n",
index df7d09409efe3a9512495b6c718ba0bbbebb39b3..8ca333f21292ee7dcb611591aed0e6f03421341b 100644 (file)
 
 #define GUEST_MAPPINGS_TRIES   5
 
+#define VBG_KERNEL_REQUEST \
+       (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
+        VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
+
 /**
  * Reserves memory in which the VMM can relocate any guest mappings
  * that are floating around.
@@ -48,7 +52,8 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
        int i, rc;
 
        /* Query the required space. */
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return;
 
@@ -135,7 +140,8 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
         * Tell the host that we're going to free the memory we reserved for
         * it, the free it up. (Leak the memory if anything goes wrong here.)
         */
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return;
 
@@ -172,8 +178,10 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
        struct vmmdev_guest_info2 *req2 = NULL;
        int rc, ret = -ENOMEM;
 
-       req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO);
-       req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2);
+       req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
+                            VBG_KERNEL_REQUEST);
+       req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
+                            VBG_KERNEL_REQUEST);
        if (!req1 || !req2)
                goto out_free;
 
@@ -187,8 +195,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
        req2->additions_minor = VBG_VERSION_MINOR;
        req2->additions_build = VBG_VERSION_BUILD;
        req2->additions_revision = VBG_SVN_REV;
-       /* (no features defined yet) */
-       req2->additions_features = 0;
+       req2->additions_features =
+               VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
        strlcpy(req2->name, VBG_VERSION_STRING,
                sizeof(req2->name));
 
@@ -230,7 +238,8 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
        struct vmmdev_guest_status *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -423,7 +432,8 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
        struct vmmdev_heartbeat *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -457,7 +467,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
 
        gdev->guest_heartbeat_req = vbg_req_alloc(
                                        sizeof(*gdev->guest_heartbeat_req),
-                                       VMMDEVREQ_GUEST_HEARTBEAT);
+                                       VMMDEVREQ_GUEST_HEARTBEAT,
+                                       VBG_KERNEL_REQUEST);
        if (!gdev->guest_heartbeat_req)
                return -ENOMEM;
 
@@ -528,7 +539,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
        struct vmmdev_mask *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -567,8 +579,14 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
        u32 changed, previous;
        int rc, ret = 0;
 
-       /* Allocate a request buffer before taking the spinlock */
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+       /*
+        * Allocate a request buffer before taking the spinlock, when
+        * the session is being terminated the requestor is the kernel,
+        * as we're cleaning up.
+        */
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
+                           session_termination ? VBG_KERNEL_REQUEST :
+                                                 session->requestor);
        if (!req) {
                if (!session_termination)
                        return -ENOMEM;
@@ -627,7 +645,8 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
        struct vmmdev_mask *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -662,8 +681,14 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
        u32 changed, previous;
        int rc, ret = 0;
 
-       /* Allocate a request buffer before taking the spinlock */
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+       /*
+        * Allocate a request buffer before taking the spinlock, when
+        * the session is being terminated the requestor is the kernel,
+        * as we're cleaning up.
+        */
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+                           session_termination ? VBG_KERNEL_REQUEST :
+                                                 session->requestor);
        if (!req) {
                if (!session_termination)
                        return -ENOMEM;
@@ -722,7 +747,8 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
        struct vmmdev_host_version *req;
        int rc, ret;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -783,19 +809,24 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
 
        gdev->mem_balloon.get_req =
                vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
-                             VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ);
+                             VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
+                             VBG_KERNEL_REQUEST);
        gdev->mem_balloon.change_req =
                vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
-                             VMMDEVREQ_CHANGE_MEMBALLOON);
+                             VMMDEVREQ_CHANGE_MEMBALLOON,
+                             VBG_KERNEL_REQUEST);
        gdev->cancel_req =
                vbg_req_alloc(sizeof(*(gdev->cancel_req)),
-                             VMMDEVREQ_HGCM_CANCEL2);
+                             VMMDEVREQ_HGCM_CANCEL2,
+                             VBG_KERNEL_REQUEST);
        gdev->ack_events_req =
                vbg_req_alloc(sizeof(*gdev->ack_events_req),
-                             VMMDEVREQ_ACKNOWLEDGE_EVENTS);
+                             VMMDEVREQ_ACKNOWLEDGE_EVENTS,
+                             VBG_KERNEL_REQUEST);
        gdev->mouse_status_req =
                vbg_req_alloc(sizeof(*gdev->mouse_status_req),
-                             VMMDEVREQ_GET_MOUSE_STATUS);
+                             VMMDEVREQ_GET_MOUSE_STATUS,
+                             VBG_KERNEL_REQUEST);
 
        if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
            !gdev->cancel_req || !gdev->ack_events_req ||
@@ -892,9 +923,9 @@ void vbg_core_exit(struct vbg_dev *gdev)
  * vboxguest_linux.c calls this when userspace opens the char-device.
  * Return: A pointer to the new session or an ERR_PTR on error.
  * @gdev:              The Guest extension device.
- * @user:              Set if this is a session for the vboxuser device.
+ * @requestor:         VMMDEV_REQUESTOR_* flags
  */
-struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
 {
        struct vbg_session *session;
 
@@ -903,7 +934,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
                return ERR_PTR(-ENOMEM);
 
        session->gdev = gdev;
-       session->user_session = user;
+       session->requestor = requestor;
 
        return session;
 }
@@ -924,7 +955,9 @@ void vbg_core_close_session(struct vbg_session *session)
                if (!session->hgcm_client_ids[i])
                        continue;
 
-               vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc);
+               /* requestor is kernel here, as we're cleaning up. */
+               vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
+                                   session->hgcm_client_ids[i], &rc);
        }
 
        kfree(session);
@@ -1152,7 +1185,8 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
                return -EPERM;
        }
 
-       if (trusted_apps_only && session->user_session) {
+       if (trusted_apps_only &&
+           (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
                vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
                        req->request_type);
                return -EPERM;
@@ -1209,8 +1243,8 @@ static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
        if (i >= ARRAY_SIZE(session->hgcm_client_ids))
                return -EMFILE;
 
-       ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id,
-                              &conn->hdr.rc);
+       ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
+                              &client_id, &conn->hdr.rc);
 
        mutex_lock(&gdev->session_mutex);
        if (ret == 0 && conn->hdr.rc >= 0) {
@@ -1251,7 +1285,8 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
        if (i >= ARRAY_SIZE(session->hgcm_client_ids))
                return -EINVAL;
 
-       ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc);
+       ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
+                                 &disconn->hdr.rc);
 
        mutex_lock(&gdev->session_mutex);
        if (ret == 0 && disconn->hdr.rc >= 0)
@@ -1313,12 +1348,12 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
        }
 
        if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
-               ret = vbg_hgcm_call32(gdev, client_id,
+               ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
                                      call->function, call->timeout_ms,
                                      VBG_IOCTL_HGCM_CALL_PARMS32(call),
                                      call->parm_count, &call->hdr.rc);
        else
-               ret = vbg_hgcm_call(gdev, client_id,
+               ret = vbg_hgcm_call(gdev, session->requestor, client_id,
                                    call->function, call->timeout_ms,
                                    VBG_IOCTL_HGCM_CALL_PARMS(call),
                                    call->parm_count, &call->hdr.rc);
@@ -1408,6 +1443,7 @@ static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
 }
 
 static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
+                                    struct vbg_session *session,
                                     struct vbg_ioctl_write_coredump *dump)
 {
        struct vmmdev_write_core_dump *req;
@@ -1415,7 +1451,8 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
        if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
                return -EINVAL;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
+                           session->requestor);
        if (!req)
                return -ENOMEM;
 
@@ -1476,7 +1513,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
        case VBG_IOCTL_CHECK_BALLOON:
                return vbg_ioctl_check_balloon(gdev, data);
        case VBG_IOCTL_WRITE_CORE_DUMP:
-               return vbg_ioctl_write_core_dump(gdev, data);
+               return vbg_ioctl_write_core_dump(gdev, session, data);
        }
 
        /* Variable sized requests. */
@@ -1508,7 +1545,8 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
        struct vmmdev_mouse_status *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
index 7ad9ec45bfa9d649627f45e9410aebff43cd22c7..4188c12b839f7e74f845cc9524c1917b188dae95 100644 (file)
@@ -154,15 +154,15 @@ struct vbg_session {
         * host. Protected by vbg_gdev.session_mutex.
         */
        u32 guest_caps;
-       /** Does this session belong to a root process or a user one? */
-       bool user_session;
+       /** VMMDEV_REQUESTOR_* flags */
+       u32 requestor;
        /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
        bool cancel_waiters;
 };
 
 int  vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
 void vbg_core_exit(struct vbg_dev *gdev);
-struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user);
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor);
 void vbg_core_close_session(struct vbg_session *session);
 int  vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
 int  vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
@@ -172,12 +172,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
 void vbg_linux_mouse_event(struct vbg_dev *gdev);
 
 /* Private (non exported) functions form vboxguest_utils.c */
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
+                   u32 requestor);
 void vbg_req_free(void *req, size_t len);
 int vbg_req_perform(struct vbg_dev *gdev, void *req);
 int vbg_hgcm_call32(
-       struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
-       struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
-       int *vbox_status);
+       struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
+       u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
+       u32 parm_count, int *vbox_status);
 
 #endif
index 6e2a9619192d2317f8f449fbb5f9c24d0699e3f8..6e8c0f1c1056296e983fd70af5de7c405392c3ee 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (C) 2006-2016 Oracle Corporation
  */
 
+#include <linux/cred.h>
 #include <linux/input.h>
 #include <linux/kernel.h>
 #include <linux/miscdevice.h>
@@ -28,6 +29,23 @@ static DEFINE_MUTEX(vbg_gdev_mutex);
 /** Global vbg_gdev pointer used by vbg_get/put_gdev. */
 static struct vbg_dev *vbg_gdev;
 
+static u32 vbg_misc_device_requestor(struct inode *inode)
+{
+       u32 requestor = VMMDEV_REQUESTOR_USERMODE |
+                       VMMDEV_REQUESTOR_CON_DONT_KNOW |
+                       VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
+
+       if (from_kuid(current_user_ns(), current->cred->uid) == 0)
+               requestor |= VMMDEV_REQUESTOR_USR_ROOT;
+       else
+               requestor |= VMMDEV_REQUESTOR_USR_USER;
+
+       if (in_egroup_p(inode->i_gid))
+               requestor |= VMMDEV_REQUESTOR_GRP_VBOX;
+
+       return requestor;
+}
+
 static int vbg_misc_device_open(struct inode *inode, struct file *filp)
 {
        struct vbg_session *session;
@@ -36,7 +54,7 @@ static int vbg_misc_device_open(struct inode *inode, struct file *filp)
        /* misc_open sets filp->private_data to our misc device */
        gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
 
-       session = vbg_core_open_session(gdev, false);
+       session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode));
        if (IS_ERR(session))
                return PTR_ERR(session);
 
@@ -53,7 +71,8 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
        gdev = container_of(filp->private_data, struct vbg_dev,
                            misc_device_user);
 
-       session = vbg_core_open_session(gdev, false);
+       session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) |
+                                             VMMDEV_REQUESTOR_USER_DEVICE);
        if (IS_ERR(session))
                return PTR_ERR(session);
 
@@ -115,7 +134,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
                         req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
 
        if (is_vmmdev_req)
-               buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
+               buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
+                                   session->requestor);
        else
                buf = kmalloc(size, GFP_KERNEL);
        if (!buf)
index bf4474214b4d31bb708c3d9c302d6ce415e17c7a..75fd140b02ff8aa41816a1f0284a4926c214df7e 100644 (file)
@@ -62,7 +62,8 @@ VBG_LOG(vbg_err, pr_err);
 VBG_LOG(vbg_debug, pr_debug);
 #endif
 
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
+                   u32 requestor)
 {
        struct vmmdev_request_header *req;
        int order = get_order(PAGE_ALIGN(len));
@@ -78,7 +79,7 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
        req->request_type = req_type;
        req->rc = VERR_GENERAL_FAILURE;
        req->reserved1 = 0;
-       req->reserved2 = 0;
+       req->requestor = requestor;
 
        return req;
 }
@@ -119,7 +120,7 @@ static bool hgcm_req_done(struct vbg_dev *gdev,
        return done;
 }
 
-int vbg_hgcm_connect(struct vbg_dev *gdev,
+int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
                     struct vmmdev_hgcm_service_location *loc,
                     u32 *client_id, int *vbox_status)
 {
@@ -127,7 +128,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
        int rc;
 
        hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
-                                    VMMDEVREQ_HGCM_CONNECT);
+                                    VMMDEVREQ_HGCM_CONNECT, requestor);
        if (!hgcm_connect)
                return -ENOMEM;
 
@@ -153,13 +154,15 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
 }
 EXPORT_SYMBOL(vbg_hgcm_connect);
 
-int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
+                       u32 client_id, int *vbox_status)
 {
        struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
        int rc;
 
        hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
-                                       VMMDEVREQ_HGCM_DISCONNECT);
+                                       VMMDEVREQ_HGCM_DISCONNECT,
+                                       requestor);
        if (!hgcm_disconnect)
                return -ENOMEM;
 
@@ -593,9 +596,10 @@ static int hgcm_call_copy_back_result(
        return 0;
 }
 
-int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
-                 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
-                 u32 parm_count, int *vbox_status)
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+                 u32 function, u32 timeout_ms,
+                 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
+                 int *vbox_status)
 {
        struct vmmdev_hgcm_call *call;
        void **bounce_bufs = NULL;
@@ -615,7 +619,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
                goto free_bounce_bufs;
        }
 
-       call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL);
+       call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
        if (!call) {
                ret = -ENOMEM;
                goto free_bounce_bufs;
@@ -647,9 +651,9 @@ EXPORT_SYMBOL(vbg_hgcm_call);
 
 #ifdef CONFIG_COMPAT
 int vbg_hgcm_call32(
-       struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
-       struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
-       int *vbox_status)
+       struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
+       u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
+       u32 parm_count, int *vbox_status)
 {
        struct vmmdev_hgcm_function_parameter *parm64 = NULL;
        u32 i, size;
@@ -689,7 +693,7 @@ int vbg_hgcm_call32(
                        goto out_free;
        }
 
-       ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms,
+       ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
                            parm64, parm_count, vbox_status);
        if (ret < 0)
                goto out_free;
index 77f0c8f8a23112f1d3c16b237514237767bab398..84834dad38d5c431d161607989080e758bb62b7b 100644 (file)
@@ -9,11 +9,10 @@
 #ifndef __VBOX_VERSION_H__
 #define __VBOX_VERSION_H__
 
-/* Last synced October 4th 2017 */
-#define VBG_VERSION_MAJOR 5
-#define VBG_VERSION_MINOR 2
+#define VBG_VERSION_MAJOR 6
+#define VBG_VERSION_MINOR 0
 #define VBG_VERSION_BUILD 0
-#define VBG_SVN_REV 68940
-#define VBG_VERSION_STRING "5.2.0"
+#define VBG_SVN_REV 127566
+#define VBG_VERSION_STRING "6.0.0"
 
 #endif
index 5e2ae978935de3630cfda2184534bb838620ea6f..6337b8d75d960bdefc5c8119a187d185ff918dfb 100644 (file)
@@ -98,8 +98,8 @@ struct vmmdev_request_header {
        s32 rc;
        /** Reserved field no.1. MBZ. */
        u32 reserved1;
-       /** Reserved field no.2. MBZ. */
-       u32 reserved2;
+       /** IN: Requestor information (VMMDEV_REQUESTOR_*) */
+       u32 requestor;
 };
 VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24);
 
@@ -247,6 +247,8 @@ struct vmmdev_guest_info {
 };
 VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8);
 
+#define VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO   BIT(0)
+
 /** struct vmmdev_guestinfo2 - Guest information report, version 2. */
 struct vmmdev_guest_info2 {
        /** Header. */
@@ -259,7 +261,7 @@ struct vmmdev_guest_info2 {
        u32 additions_build;
        /** SVN revision. */
        u32 additions_revision;
-       /** Feature mask, currently unused. */
+       /** Feature mask. */
        u32 additions_features;
        /**
         * The intentional meaning of this field was:
index d0584c040c60f3a8f1a8b48004ec66074eebdcc8..7a0398bb84f77e520aeb8113c2ac77ef7ef6e0f2 100644 (file)
@@ -255,9 +255,11 @@ void vp_del_vqs(struct virtio_device *vdev)
        for (i = 0; i < vp_dev->msix_used_vectors; ++i)
                free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
 
-       for (i = 0; i < vp_dev->msix_vectors; i++)
-               if (vp_dev->msix_affinity_masks[i])
-                       free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+       if (vp_dev->msix_affinity_masks) {
+               for (i = 0; i < vp_dev->msix_vectors; i++)
+                       if (vp_dev->msix_affinity_masks[i])
+                               free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+       }
 
        if (vp_dev->msix_enabled) {
                /* Disable the vector used for configuration */
index 18846afb39da189e3f0dd0168727e0cf2865549e..5df92c308286dc0f5afe203cf279adb3d8af8185 100644 (file)
@@ -882,6 +882,8 @@ static struct virtqueue *vring_create_virtqueue_split(
                                          GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
                if (queue)
                        break;
+               if (!may_reduce_num)
+                       return NULL;
        }
 
        if (!num)
index 0f4ecfcdb5497afa21656fa54db814108c2694e7..a9fb775852723ac7740437a220180a9ebab81c0b 100644 (file)
@@ -1016,15 +1016,15 @@ static int ds_probe(struct usb_interface *intf,
        /* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */
        alt = 3;
        err = usb_set_interface(dev->udev,
-               intf->altsetting[alt].desc.bInterfaceNumber, alt);
+               intf->cur_altsetting->desc.bInterfaceNumber, alt);
        if (err) {
                dev_err(&dev->udev->dev, "Failed to set alternative setting %d "
                        "for %d interface: err=%d.\n", alt,
-                       intf->altsetting[alt].desc.bInterfaceNumber, err);
+                       intf->cur_altsetting->desc.bInterfaceNumber, err);
                goto err_out_clear;
        }
 
-       iface_desc = &intf->altsetting[alt];
+       iface_desc = intf->cur_altsetting;
        if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
                pr_info("Num endpoints=%d. It is not DS9490R.\n",
                        iface_desc->desc.bNumEndpoints);
index de01a6d0059dc4adcb98a24197750f72b0b4ceaf..a1c61e351d3f7ee5cb8e82ba4a391559e6a8aef2 100644 (file)
@@ -140,8 +140,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
-                          GFP_KERNEL);
+       vma_priv = kzalloc(struct_size(vma_priv, pages, count), GFP_KERNEL);
        if (!vma_priv)
                return -ENOMEM;
 
index c3e201025ef015b49703cf311e71f1d1f041ec6e..0782ff3c227352e7b92b595960cc62aee5c2c4ce 100644 (file)
@@ -622,9 +622,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
        if (xen_store_evtchn == 0)
                return -ENOENT;
 
-       nonseekable_open(inode, filp);
-
-       filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
+       stream_open(inode, filp);
 
        u = kzalloc(sizeof(*u), GFP_KERNEL);
        if (u == NULL)
index 1c7955f5cdaf2e776026390f615806f3e6ce535c..128f2dbe256a4eb0f6124294f883b29d8a57e10e 100644 (file)
@@ -203,8 +203,7 @@ void afs_put_cb_interest(struct afs_net *net, struct afs_cb_interest *cbi)
  */
 void afs_init_callback_state(struct afs_server *server)
 {
-       if (!test_and_clear_bit(AFS_SERVER_FL_NEW, &server->flags))
-               server->cb_s_break++;
+       server->cb_s_break++;
 }
 
 /*
index 8ee5972893ed5a75583bfb2821a42636403ee086..2f8acb4c556d28c77ec6d8c130eaaf3916a38403 100644 (file)
@@ -34,7 +34,7 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *);
 static int afs_deliver_yfs_cb_callback(struct afs_call *);
 
 #define CM_NAME(name) \
-       const char afs_SRXCB##name##_name[] __tracepoint_string =       \
+       char afs_SRXCB##name##_name[] __tracepoint_string =     \
                "CB." #name
 
 /*
index ca08c83168f5fbf1f7f6b52c8c3ff769bf70cf04..0b37867b5c202332b66ba5bede2a31e4287a23e0 100644 (file)
@@ -1515,8 +1515,8 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr)
 
        xdr_encode_AFS_StoreStatus(&bp, attr);
 
-       *bp++ = 0;                              /* position of start of write */
-       *bp++ = 0;
+       *bp++ = htonl(attr->ia_size >> 32);     /* position of start of write */
+       *bp++ = htonl((u32) attr->ia_size);
        *bp++ = 0;                              /* size of write */
        *bp++ = 0;
        *bp++ = htonl(attr->ia_size >> 32);     /* new file length */
@@ -1564,7 +1564,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
 
        xdr_encode_AFS_StoreStatus(&bp, attr);
 
-       *bp++ = 0;                              /* position of start of write */
+       *bp++ = htonl(attr->ia_size);           /* position of start of write */
        *bp++ = 0;                              /* size of write */
        *bp++ = htonl(attr->ia_size);           /* new file length */
 
index 1a4ce07fb406da8e3a4e0d12c6fda605636ccb47..9cedc3fc1b7744679010f4aae412c92925cd3b3a 100644 (file)
@@ -216,9 +216,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
        set_nlink(inode, 2);
        inode->i_uid            = GLOBAL_ROOT_UID;
        inode->i_gid            = GLOBAL_ROOT_GID;
-       inode->i_ctime.tv_sec   = get_seconds();
-       inode->i_ctime.tv_nsec  = 0;
-       inode->i_atime          = inode->i_mtime = inode->i_ctime;
+       inode->i_ctime = inode->i_atime = inode->i_mtime = current_time(inode);
        inode->i_blocks         = 0;
        inode_set_iversion_raw(inode, 0);
        inode->i_generation     = 0;
index bb1f244b2b3ac2ff4a8428a8f72132a6ed230e01..3904ab0b95632af35c4db8fcad36ee6e1f277b47 100644 (file)
@@ -474,7 +474,6 @@ struct afs_server {
        time64_t                put_time;       /* Time at which last put */
        time64_t                update_at;      /* Time at which to next update the record */
        unsigned long           flags;
-#define AFS_SERVER_FL_NEW      0               /* New server, don't inc cb_s_break */
 #define AFS_SERVER_FL_NOT_READY        1               /* The record is not ready for use */
 #define AFS_SERVER_FL_NOT_FOUND        2               /* VL server says no such server */
 #define AFS_SERVER_FL_VL_FAIL  3               /* Failed to access VL server */
@@ -827,7 +826,7 @@ static inline struct afs_cb_interest *afs_get_cb_interest(struct afs_cb_interest
 
 static inline unsigned int afs_calc_vnode_cb_break(struct afs_vnode *vnode)
 {
-       return vnode->cb_break + vnode->cb_s_break + vnode->cb_v_break;
+       return vnode->cb_break + vnode->cb_v_break;
 }
 
 static inline bool afs_cb_is_broken(unsigned int cb_break,
@@ -835,7 +834,6 @@ static inline bool afs_cb_is_broken(unsigned int cb_break,
                                    const struct afs_cb_interest *cbi)
 {
        return !cbi || cb_break != (vnode->cb_break +
-                                   cbi->server->cb_s_break +
                                    vnode->volume->cb_v_break);
 }
 
index 2c588f9bbbda226ec64fa0670e9c92c700f259e6..15c7e82d80cb30c0358db68f416b8d88b06dbc7c 100644 (file)
@@ -572,13 +572,17 @@ static void afs_deliver_to_call(struct afs_call *call)
                case -ENODATA:
                case -EBADMSG:
                case -EMSGSIZE:
-               default:
                        abort_code = RXGEN_CC_UNMARSHAL;
                        if (state != AFS_CALL_CL_AWAIT_REPLY)
                                abort_code = RXGEN_SS_UNMARSHAL;
                        rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
                                                abort_code, ret, "KUM");
                        goto local_abort;
+               default:
+                       abort_code = RX_USER_ABORT;
+                       rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+                                               abort_code, ret, "KER");
+                       goto local_abort;
                }
        }
 
@@ -610,6 +614,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
        bool stalled = false;
        u64 rtt;
        u32 life, last_life;
+       bool rxrpc_complete = false;
 
        DECLARE_WAITQUEUE(myself, current);
 
@@ -621,7 +626,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
                rtt2 = 2;
 
        timeout = rtt2;
-       last_life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
+       rxrpc_kernel_check_life(call->net->socket, call->rxcall, &last_life);
 
        add_wait_queue(&call->waitq, &myself);
        for (;;) {
@@ -639,7 +644,12 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
                if (afs_check_call_state(call, AFS_CALL_COMPLETE))
                        break;
 
-               life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
+               if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall, &life)) {
+                       /* rxrpc terminated the call. */
+                       rxrpc_complete = true;
+                       break;
+               }
+
                if (timeout == 0 &&
                    life == last_life && signal_pending(current)) {
                        if (stalled)
@@ -663,12 +673,16 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
        remove_wait_queue(&call->waitq, &myself);
        __set_current_state(TASK_RUNNING);
 
-       /* Kill off the call if it's still live. */
        if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) {
-               _debug("call interrupted");
-               if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
-                                           RX_USER_ABORT, -EINTR, "KWI"))
-                       afs_set_call_complete(call, -EINTR, 0);
+               if (rxrpc_complete) {
+                       afs_set_call_complete(call, call->error, call->abort_code);
+               } else {
+                       /* Kill off the call if it's still live. */
+                       _debug("call interrupted");
+                       if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+                                                   RX_USER_ABORT, -EINTR, "KWI"))
+                               afs_set_call_complete(call, -EINTR, 0);
+               }
        }
 
        spin_lock_bh(&call->state_lock);
index 642afa2e9783c4f95284980dd8054610fa4d49cf..65b33b6da48b9411c8385a27869785d5076713b1 100644 (file)
@@ -226,7 +226,6 @@ static struct afs_server *afs_alloc_server(struct afs_net *net,
        RCU_INIT_POINTER(server->addresses, alist);
        server->addr_version = alist->version;
        server->uuid = *uuid;
-       server->flags = (1UL << AFS_SERVER_FL_NEW);
        server->update_at = ktime_get_real_seconds() + afs_server_update_delay;
        rwlock_init(&server->fs_lock);
        INIT_HLIST_HEAD(&server->cb_volumes);
index 72efcfcf9f95efd2b5cae1257a8d01247367ebeb..0122d7445fba1e07eaf62b4be1d1e69c66e5f7c4 100644 (file)
@@ -264,6 +264,7 @@ static void afs_kill_pages(struct address_space *mapping,
                                first = page->index + 1;
                        lock_page(page);
                        generic_error_remove_page(mapping, page);
+                       unlock_page(page);
                }
 
                __pagevec_release(&pv);
index 5aa57929e8c23559c41b8a875f3ea2db43a364dc..6e97a42d24d130471a97a28510ec3712605c50cd 100644 (file)
@@ -1514,7 +1514,7 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
        bp = xdr_encode_u32(bp, 0); /* RPC flags */
        bp = xdr_encode_YFSFid(bp, &vnode->fid);
        bp = xdr_encode_YFS_StoreStatus(bp, attr);
-       bp = xdr_encode_u64(bp, 0);             /* position of start of write */
+       bp = xdr_encode_u64(bp, attr->ia_size); /* position of start of write */
        bp = xdr_encode_u64(bp, 0);             /* size of write */
        bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */
        yfs_check_req(call, bp);
index 38b741aef0bf5a93513f1ad1f8ab61b9de7c8078..3490d1fa0e16f4f1f189727661e18696ab3a7a08 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -181,7 +181,7 @@ struct poll_iocb {
        struct file             *file;
        struct wait_queue_head  *head;
        __poll_t                events;
-       bool                    woken;
+       bool                    done;
        bool                    cancelled;
        struct wait_queue_entry wait;
        struct work_struct      work;
@@ -204,8 +204,7 @@ struct aio_kiocb {
        struct kioctx           *ki_ctx;
        kiocb_cancel_fn         *ki_cancel;
 
-       struct iocb __user      *ki_user_iocb;  /* user's aiocb */
-       __u64                   ki_user_data;   /* user's data for completion */
+       struct io_event         ki_res;
 
        struct list_head        ki_list;        /* the aio core uses this
                                                 * for cancellation */
@@ -1022,6 +1021,9 @@ static bool get_reqs_available(struct kioctx *ctx)
 /* aio_get_req
  *     Allocate a slot for an aio request.
  * Returns NULL if no requests are free.
+ *
+ * The refcount is initialized to 2 - one for the async op completion,
+ * one for the synchronous code that does this.
  */
 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
 {
@@ -1031,10 +1033,15 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
        if (unlikely(!req))
                return NULL;
 
+       if (unlikely(!get_reqs_available(ctx))) {
+               kmem_cache_free(kiocb_cachep, req);
+               return NULL;
+       }
+
        percpu_ref_get(&ctx->reqs);
        req->ki_ctx = ctx;
        INIT_LIST_HEAD(&req->ki_list);
-       refcount_set(&req->ki_refcnt, 0);
+       refcount_set(&req->ki_refcnt, 2);
        req->ki_eventfd = NULL;
        return req;
 }
@@ -1067,30 +1074,20 @@ out:
        return ret;
 }
 
-static inline void iocb_put(struct aio_kiocb *iocb)
-{
-       if (refcount_read(&iocb->ki_refcnt) == 0 ||
-           refcount_dec_and_test(&iocb->ki_refcnt)) {
-               if (iocb->ki_filp)
-                       fput(iocb->ki_filp);
-               percpu_ref_put(&iocb->ki_ctx->reqs);
-               kmem_cache_free(kiocb_cachep, iocb);
-       }
-}
-
-static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb,
-                          long res, long res2)
+static inline void iocb_destroy(struct aio_kiocb *iocb)
 {
-       ev->obj = (u64)(unsigned long)iocb->ki_user_iocb;
-       ev->data = iocb->ki_user_data;
-       ev->res = res;
-       ev->res2 = res2;
+       if (iocb->ki_eventfd)
+               eventfd_ctx_put(iocb->ki_eventfd);
+       if (iocb->ki_filp)
+               fput(iocb->ki_filp);
+       percpu_ref_put(&iocb->ki_ctx->reqs);
+       kmem_cache_free(kiocb_cachep, iocb);
 }
 
 /* aio_complete
  *     Called when the io request on the given iocb is complete.
  */
-static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
+static void aio_complete(struct aio_kiocb *iocb)
 {
        struct kioctx   *ctx = iocb->ki_ctx;
        struct aio_ring *ring;
@@ -1114,14 +1111,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
        ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
        event = ev_page + pos % AIO_EVENTS_PER_PAGE;
 
-       aio_fill_event(event, iocb, res, res2);
+       *event = iocb->ki_res;
 
        kunmap_atomic(ev_page);
        flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
 
-       pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
-                ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data,
-                res, res2);
+       pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
+                (void __user *)(unsigned long)iocb->ki_res.obj,
+                iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
 
        /* after flagging the request as done, we
         * must never even look at it again
@@ -1148,10 +1145,8 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
         * eventfd. The eventfd_signal() function is safe to be called
         * from IRQ context.
         */
-       if (iocb->ki_eventfd) {
+       if (iocb->ki_eventfd)
                eventfd_signal(iocb->ki_eventfd, 1);
-               eventfd_ctx_put(iocb->ki_eventfd);
-       }
 
        /*
         * We have to order our ring_info tail store above and test
@@ -1163,7 +1158,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
 
        if (waitqueue_active(&ctx->wait))
                wake_up(&ctx->wait);
-       iocb_put(iocb);
+}
+
+static inline void iocb_put(struct aio_kiocb *iocb)
+{
+       if (refcount_dec_and_test(&iocb->ki_refcnt)) {
+               aio_complete(iocb);
+               iocb_destroy(iocb);
+       }
 }
 
 /* aio_read_events_ring
@@ -1437,7 +1439,9 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
                file_end_write(kiocb->ki_filp);
        }
 
-       aio_complete(iocb, res, res2);
+       iocb->ki_res.res = res;
+       iocb->ki_res.res2 = res2;
+       iocb_put(iocb);
 }
 
 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
@@ -1514,13 +1518,13 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
        }
 }
 
-static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
+static int aio_read(struct kiocb *req, const struct iocb *iocb,
                        bool vectored, bool compat)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct iov_iter iter;
        struct file *file;
-       ssize_t ret;
+       int ret;
 
        ret = aio_prep_rw(req, iocb);
        if (ret)
@@ -1542,13 +1546,13 @@ static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
        return ret;
 }
 
-static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
+static int aio_write(struct kiocb *req, const struct iocb *iocb,
                         bool vectored, bool compat)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct iov_iter iter;
        struct file *file;
-       ssize_t ret;
+       int ret;
 
        ret = aio_prep_rw(req, iocb);
        if (ret)
@@ -1585,11 +1589,10 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
 
 static void aio_fsync_work(struct work_struct *work)
 {
-       struct fsync_iocb *req = container_of(work, struct fsync_iocb, work);
-       int ret;
+       struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
 
-       ret = vfs_fsync(req->file, req->datasync);
-       aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
+       iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
+       iocb_put(iocb);
 }
 
 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
@@ -1608,11 +1611,6 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
        return 0;
 }
 
-static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
-{
-       aio_complete(iocb, mangle_poll(mask), 0);
-}
-
 static void aio_poll_complete_work(struct work_struct *work)
 {
        struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1638,9 +1636,11 @@ static void aio_poll_complete_work(struct work_struct *work)
                return;
        }
        list_del_init(&iocb->ki_list);
+       iocb->ki_res.res = mangle_poll(mask);
+       req->done = true;
        spin_unlock_irq(&ctx->ctx_lock);
 
-       aio_poll_complete(iocb, mask);
+       iocb_put(iocb);
 }
 
 /* assumes we are called with irqs disabled */
@@ -1668,31 +1668,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
        __poll_t mask = key_to_poll(key);
        unsigned long flags;
 
-       req->woken = true;
-
        /* for instances that support it check for an event match first: */
-       if (mask) {
-               if (!(mask & req->events))
-                       return 0;
+       if (mask && !(mask & req->events))
+               return 0;
+
+       list_del_init(&req->wait.entry);
 
+       if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
                /*
                 * Try to complete the iocb inline if we can. Use
                 * irqsave/irqrestore because not all filesystems (e.g. fuse)
                 * call this function with IRQs disabled and because IRQs
                 * have to be disabled before ctx_lock is obtained.
                 */
-               if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
-                       list_del(&iocb->ki_list);
-                       spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
-
-                       list_del_init(&req->wait.entry);
-                       aio_poll_complete(iocb, mask);
-                       return 1;
-               }
+               list_del(&iocb->ki_list);
+               iocb->ki_res.res = mangle_poll(mask);
+               req->done = true;
+               spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
+               iocb_put(iocb);
+       } else {
+               schedule_work(&req->work);
        }
-
-       list_del_init(&req->wait.entry);
-       schedule_work(&req->work);
        return 1;
 }
 
@@ -1719,11 +1715,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
        add_wait_queue(head, &pt->iocb->poll.wait);
 }
 
-static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
+static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
 {
        struct kioctx *ctx = aiocb->ki_ctx;
        struct poll_iocb *req = &aiocb->poll;
        struct aio_poll_table apt;
+       bool cancel = false;
        __poll_t mask;
 
        /* reject any unknown events outside the normal event mask. */
@@ -1737,7 +1734,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
        req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
 
        req->head = NULL;
-       req->woken = false;
+       req->done = false;
        req->cancelled = false;
 
        apt.pt._qproc = aio_poll_queue_proc;
@@ -1749,156 +1746,135 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
        INIT_LIST_HEAD(&req->wait.entry);
        init_waitqueue_func_entry(&req->wait, aio_poll_wake);
 
-       /* one for removal from waitqueue, one for this function */
-       refcount_set(&aiocb->ki_refcnt, 2);
-
        mask = vfs_poll(req->file, &apt.pt) & req->events;
-       if (unlikely(!req->head)) {
-               /* we did not manage to set up a waitqueue, done */
-               goto out;
-       }
-
        spin_lock_irq(&ctx->ctx_lock);
-       spin_lock(&req->head->lock);
-       if (req->woken) {
-               /* wake_up context handles the rest */
-               mask = 0;
+       if (likely(req->head)) {
+               spin_lock(&req->head->lock);
+               if (unlikely(list_empty(&req->wait.entry))) {
+                       if (apt.error)
+                               cancel = true;
+                       apt.error = 0;
+                       mask = 0;
+               }
+               if (mask || apt.error) {
+                       list_del_init(&req->wait.entry);
+               } else if (cancel) {
+                       WRITE_ONCE(req->cancelled, true);
+               } else if (!req->done) { /* actually waiting for an event */
+                       list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
+                       aiocb->ki_cancel = aio_poll_cancel;
+               }
+               spin_unlock(&req->head->lock);
+       }
+       if (mask) { /* no async, we'd stolen it */
+               aiocb->ki_res.res = mangle_poll(mask);
                apt.error = 0;
-       } else if (mask || apt.error) {
-               /* if we get an error or a mask we are done */
-               WARN_ON_ONCE(list_empty(&req->wait.entry));
-               list_del_init(&req->wait.entry);
-       } else {
-               /* actually waiting for an event */
-               list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
-               aiocb->ki_cancel = aio_poll_cancel;
        }
-       spin_unlock(&req->head->lock);
        spin_unlock_irq(&ctx->ctx_lock);
-
-out:
-       if (unlikely(apt.error))
-               return apt.error;
-
        if (mask)
-               aio_poll_complete(aiocb, mask);
-       iocb_put(aiocb);
-       return 0;
+               iocb_put(aiocb);
+       return apt.error;
 }
 
 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
-                          struct iocb __user *user_iocb, bool compat)
+                          struct iocb __user *user_iocb, struct aio_kiocb *req,
+                          bool compat)
 {
-       struct aio_kiocb *req;
-       ssize_t ret;
-
-       /* enforce forwards compatibility on users */
-       if (unlikely(iocb->aio_reserved2)) {
-               pr_debug("EINVAL: reserve field set\n");
-               return -EINVAL;
-       }
-
-       /* prevent overflows */
-       if (unlikely(
-           (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
-           (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
-           ((ssize_t)iocb->aio_nbytes < 0)
-          )) {
-               pr_debug("EINVAL: overflow check\n");
-               return -EINVAL;
-       }
-
-       if (!get_reqs_available(ctx))
-               return -EAGAIN;
-
-       ret = -EAGAIN;
-       req = aio_get_req(ctx);
-       if (unlikely(!req))
-               goto out_put_reqs_available;
-
        req->ki_filp = fget(iocb->aio_fildes);
-       ret = -EBADF;
        if (unlikely(!req->ki_filp))
-               goto out_put_req;
+               return -EBADF;
 
        if (iocb->aio_flags & IOCB_FLAG_RESFD) {
+               struct eventfd_ctx *eventfd;
                /*
                 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
                 * instance of the file* now. The file descriptor must be
                 * an eventfd() fd, and will be signaled for each completed
                 * event using the eventfd_signal() function.
                 */
-               req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
-               if (IS_ERR(req->ki_eventfd)) {
-                       ret = PTR_ERR(req->ki_eventfd);
-                       req->ki_eventfd = NULL;
-                       goto out_put_req;
-               }
+               eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
+               if (IS_ERR(eventfd))
+                       return PTR_ERR(eventfd);
+
+               req->ki_eventfd = eventfd;
        }
 
-       ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
-       if (unlikely(ret)) {
+       if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
                pr_debug("EFAULT: aio_key\n");
-               goto out_put_req;
+               return -EFAULT;
        }
 
-       req->ki_user_iocb = user_iocb;
-       req->ki_user_data = iocb->aio_data;
+       req->ki_res.obj = (u64)(unsigned long)user_iocb;
+       req->ki_res.data = iocb->aio_data;
+       req->ki_res.res = 0;
+       req->ki_res.res2 = 0;
 
        switch (iocb->aio_lio_opcode) {
        case IOCB_CMD_PREAD:
-               ret = aio_read(&req->rw, iocb, false, compat);
-               break;
+               return aio_read(&req->rw, iocb, false, compat);
        case IOCB_CMD_PWRITE:
-               ret = aio_write(&req->rw, iocb, false, compat);
-               break;
+               return aio_write(&req->rw, iocb, false, compat);
        case IOCB_CMD_PREADV:
-               ret = aio_read(&req->rw, iocb, true, compat);
-               break;
+               return aio_read(&req->rw, iocb, true, compat);
        case IOCB_CMD_PWRITEV:
-               ret = aio_write(&req->rw, iocb, true, compat);
-               break;
+               return aio_write(&req->rw, iocb, true, compat);
        case IOCB_CMD_FSYNC:
-               ret = aio_fsync(&req->fsync, iocb, false);
-               break;
+               return aio_fsync(&req->fsync, iocb, false);
        case IOCB_CMD_FDSYNC:
-               ret = aio_fsync(&req->fsync, iocb, true);
-               break;
+               return aio_fsync(&req->fsync, iocb, true);
        case IOCB_CMD_POLL:
-               ret = aio_poll(req, iocb);
-               break;
+               return aio_poll(req, iocb);
        default:
                pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
-               ret = -EINVAL;
-               break;
+               return -EINVAL;
        }
-
-       /*
-        * If ret is 0, we'd either done aio_complete() ourselves or have
-        * arranged for that to be done asynchronously.  Anything non-zero
-        * means that we need to destroy req ourselves.
-        */
-       if (ret)
-               goto out_put_req;
-       return 0;
-out_put_req:
-       if (req->ki_eventfd)
-               eventfd_ctx_put(req->ki_eventfd);
-       iocb_put(req);
-out_put_reqs_available:
-       put_reqs_available(ctx, 1);
-       return ret;
 }
 
 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                         bool compat)
 {
+       struct aio_kiocb *req;
        struct iocb iocb;
+       int err;
 
        if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
                return -EFAULT;
 
-       return __io_submit_one(ctx, &iocb, user_iocb, compat);
+       /* enforce forwards compatibility on users */
+       if (unlikely(iocb.aio_reserved2)) {
+               pr_debug("EINVAL: reserve field set\n");
+               return -EINVAL;
+       }
+
+       /* prevent overflows */
+       if (unlikely(
+           (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
+           (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
+           ((ssize_t)iocb.aio_nbytes < 0)
+          )) {
+               pr_debug("EINVAL: overflow check\n");
+               return -EINVAL;
+       }
+
+       req = aio_get_req(ctx);
+       if (unlikely(!req))
+               return -EAGAIN;
+
+       err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
+
+       /* Done with the synchronous reference */
+       iocb_put(req);
+
+       /*
+        * If err is 0, we'd either done aio_complete() ourselves or have
+        * arranged for that to be done asynchronously.  Anything non-zero
+        * means that we need to destroy req ourselves.
+        */
+       if (unlikely(err)) {
+               iocb_destroy(req);
+               put_reqs_available(ctx, 1);
+       }
+       return err;
 }
 
 /* sys_io_submit:
@@ -1997,24 +1973,6 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
 }
 #endif
 
-/* lookup_kiocb
- *     Finds a given iocb for cancellation.
- */
-static struct aio_kiocb *
-lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb)
-{
-       struct aio_kiocb *kiocb;
-
-       assert_spin_locked(&ctx->ctx_lock);
-
-       /* TODO: use a hash or array, this sucks. */
-       list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
-               if (kiocb->ki_user_iocb == iocb)
-                       return kiocb;
-       }
-       return NULL;
-}
-
 /* sys_io_cancel:
  *     Attempts to cancel an iocb previously passed to io_submit.  If
  *     the operation is successfully cancelled, the resulting event is
@@ -2032,6 +1990,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
        struct aio_kiocb *kiocb;
        int ret = -EINVAL;
        u32 key;
+       u64 obj = (u64)(unsigned long)iocb;
 
        if (unlikely(get_user(key, &iocb->aio_key)))
                return -EFAULT;
@@ -2043,10 +2002,13 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
                return -EINVAL;
 
        spin_lock_irq(&ctx->ctx_lock);
-       kiocb = lookup_kiocb(ctx, iocb);
-       if (kiocb) {
-               ret = kiocb->ki_cancel(&kiocb->rw);
-               list_del_init(&kiocb->ki_list);
+       /* TODO: use a hash or array, this sucks. */
+       list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
+               if (kiocb->ki_res.obj == obj) {
+                       ret = kiocb->ki_cancel(&kiocb->rw);
+                       list_del_init(&kiocb->ki_list);
+                       break;
+               }
        }
        spin_unlock_irq(&ctx->ctx_lock);
 
index e9faa52bb489c424775af69ccf9362c1c4b07c2f..bb28e2ead679c10a21a3f12dff7bddefce990ee5 100644 (file)
@@ -264,7 +264,8 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
        bio_for_each_segment_all(bvec, &bio, i, iter_all) {
                if (should_dirty && !PageCompound(bvec->bv_page))
                        set_page_dirty_lock(bvec->bv_page);
-               put_page(bvec->bv_page);
+               if (!bio_flagged(&bio, BIO_NO_PAGE_REF))
+                       put_page(bvec->bv_page);
        }
 
        if (unlikely(bio.bi_status))
@@ -307,10 +308,10 @@ static void blkdev_bio_end_io(struct bio *bio)
        struct blkdev_dio *dio = bio->bi_private;
        bool should_dirty = dio->should_dirty;
 
-       if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
-               if (bio->bi_status && !dio->bio.bi_status)
-                       dio->bio.bi_status = bio->bi_status;
-       } else {
+       if (bio->bi_status && !dio->bio.bi_status)
+               dio->bio.bi_status = bio->bi_status;
+
+       if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
                if (!dio->is_sync) {
                        struct kiocb *iocb = dio->iocb;
                        ssize_t ret;
@@ -336,12 +337,14 @@ static void blkdev_bio_end_io(struct bio *bio)
        if (should_dirty) {
                bio_check_pages_dirty(bio);
        } else {
-               struct bio_vec *bvec;
-               int i;
-               struct bvec_iter_all iter_all;
+               if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
+                       struct bvec_iter_all iter_all;
+                       struct bio_vec *bvec;
+                       int i;
 
-               bio_for_each_segment_all(bvec, bio, i, iter_all)
-                       put_page(bvec->bv_page);
+                       bio_for_each_segment_all(bvec, bio, i, iter_all)
+                               put_page(bvec->bv_page);
+               }
                bio_put(bio);
        }
 }
index 1d49694e6ae3226044c8d9464cca09f78889d335..c5880329ae37c661b4e87b3cafc0599e776f242d 100644 (file)
@@ -6174,7 +6174,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
         *
         * This is overestimating in most cases.
         */
-       qgroup_rsv_size = outstanding_extents * fs_info->nodesize;
+       qgroup_rsv_size = (u64)outstanding_extents * fs_info->nodesize;
 
        spin_lock(&block_rsv->lock);
        block_rsv->size = reserve_size;
index 920bf3b4b0ef5e5296d3cec2c2e82a8ab78dc0ac..cccc75d15970cbc61e8e1bcd1735fb28dc549123 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/slab.h>
 #include <linux/pagemap.h>
 #include <linux/highmem.h>
+#include <linux/sched/mm.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -427,9 +428,13 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
        unsigned long this_sum_bytes = 0;
        int i;
        u64 offset;
+       unsigned nofs_flag;
+
+       nofs_flag = memalloc_nofs_save();
+       sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
+                      GFP_KERNEL);
+       memalloc_nofs_restore(nofs_flag);
 
-       sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
-                      GFP_NOFS);
        if (!sums)
                return BLK_STS_RESOURCE;
 
@@ -472,8 +477,10 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
 
                                bytes_left = bio->bi_iter.bi_size - total_bytes;
 
-                               sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
-                                              GFP_NOFS);
+                               nofs_flag = memalloc_nofs_save();
+                               sums = kvzalloc(btrfs_ordered_sum_size(fs_info,
+                                                     bytes_left), GFP_KERNEL);
+                               memalloc_nofs_restore(nofs_flag);
                                BUG_ON(!sums); /* -ENOMEM */
                                sums->len = bytes_left;
                                ordered = btrfs_lookup_ordered_extent(inode,
index 82fdda8ff5ab82b5298c4b72859e697d8bd1a3d5..2973608824ecacbfaad11a6f0d57460d113a5e47 100644 (file)
@@ -6783,7 +6783,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
        u64 extent_start = 0;
        u64 extent_end = 0;
        u64 objectid = btrfs_ino(inode);
-       u8 extent_type;
+       int extent_type = -1;
        struct btrfs_path *path = NULL;
        struct btrfs_root *root = inode->root;
        struct btrfs_file_extent_item *item;
index ec2d8919e7fb0ee63c28bcfd241d0e8a54ed05a3..cd4e693406a0e62bda2171d7d6800c8b4ef65ab0 100644 (file)
@@ -501,6 +501,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
+       /*
+        * If the fs is mounted with nologreplay, which requires it to be
+        * mounted in RO mode as well, we can not allow discard on free space
+        * inside block groups, because log trees refer to extents that are not
+        * pinned in a block group's free space cache (pinning the extents is
+        * precisely the first phase of replaying a log tree).
+        */
+       if (btrfs_test_opt(fs_info, NOLOGREPLAY))
+               return -EROFS;
+
        rcu_read_lock();
        list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
                                dev_list) {
index 6fde2b2741ef13b2bdabfac4ad29937d7e784afa..45e3cfd1198bc29265d28a360f3d261a70144953 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/slab.h>
 #include <linux/blkdev.h>
 #include <linux/writeback.h>
+#include <linux/sched/mm.h>
 #include "ctree.h"
 #include "transaction.h"
 #include "btrfs_inode.h"
@@ -442,7 +443,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
                        cur = entry->list.next;
                        sum = list_entry(cur, struct btrfs_ordered_sum, list);
                        list_del(&sum->list);
-                       kfree(sum);
+                       kvfree(sum);
                }
                kmem_cache_free(btrfs_ordered_extent_cache, entry);
        }
index dc6140013ae8194739a8aa6a387f12c35794bdf9..61d22a56c0ba4e7d43f0552854f4ac4e82443218 100644 (file)
@@ -366,11 +366,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
 
 static int prop_compression_validate(const char *value, size_t len)
 {
-       if (!strncmp("lzo", value, len))
+       if (!strncmp("lzo", value, 3))
                return 0;
-       else if (!strncmp("zlib", value, len))
+       else if (!strncmp("zlib", value, 4))
                return 0;
-       else if (!strncmp("zstd", value, len))
+       else if (!strncmp("zstd", value, 4))
                return 0;
 
        return -EINVAL;
@@ -396,7 +396,7 @@ static int prop_compression_apply(struct inode *inode,
                btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
        } else if (!strncmp("zlib", value, 4)) {
                type = BTRFS_COMPRESS_ZLIB;
-       } else if (!strncmp("zstd", value, len)) {
+       } else if (!strncmp("zstd", value, 4)) {
                type = BTRFS_COMPRESS_ZSTD;
                btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
        } else {
index eb680b715dd6b2f50e7d98026a8dcc256797eacc..e659d9d6110733845b35309703d9dff73cb78689 100644 (file)
@@ -1922,8 +1922,8 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
        int i;
 
        /* Level sanity check */
-       if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL ||
-           root_level < 0 || root_level >= BTRFS_MAX_LEVEL ||
+       if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
+           root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
            root_level < cur_level) {
                btrfs_err_rl(fs_info,
                        "%s: bad levels, cur_level=%d root_level=%d",
index 1869ba8e5981c948c435bf26e3c2f9d8016770b0..67a6f7d4740230aaa24b1ecad5e91ec5b94b5f70 100644 (file)
@@ -2430,8 +2430,9 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
                        bitmap_clear(rbio->dbitmap, pagenr, 1);
                kunmap(p);
 
-               for (stripe = 0; stripe < rbio->real_stripes; stripe++)
+               for (stripe = 0; stripe < nr_data; stripe++)
                        kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
+               kunmap(p_page);
        }
 
        __free_page(p_page);
index d09b6cdb785a0a979a91d471e34844549d102df5..b283d3a6e837dd0975d1cfbde7edad317edfb814 100644 (file)
@@ -205,28 +205,17 @@ static struct root_entry *lookup_root_entry(struct rb_root *root, u64 objectid)
 #ifdef CONFIG_STACKTRACE
 static void __save_stack_trace(struct ref_action *ra)
 {
-       struct stack_trace stack_trace;
-
-       stack_trace.max_entries = MAX_TRACE;
-       stack_trace.nr_entries = 0;
-       stack_trace.entries = ra->trace;
-       stack_trace.skip = 2;
-       save_stack_trace(&stack_trace);
-       ra->trace_len = stack_trace.nr_entries;
+       ra->trace_len = stack_trace_save(ra->trace, MAX_TRACE, 2);
 }
 
 static void __print_stack_trace(struct btrfs_fs_info *fs_info,
                                struct ref_action *ra)
 {
-       struct stack_trace trace;
-
        if (ra->trace_len == 0) {
                btrfs_err(fs_info, "  ref-verify: no stacktrace");
                return;
        }
-       trace.nr_entries = ra->trace_len;
-       trace.entries = ra->trace;
-       print_stack_trace(&trace, 2);
+       stack_trace_print(ra->trace, ra->trace_len, 2);
 }
 #else
 static void inline __save_stack_trace(struct ref_action *ra)
index acdad6d658f54bda7cf9c379867d212a41d1c24b..e4e665f422fc4c87b05181211b73ca2097e3b7c2 100644 (file)
@@ -1886,8 +1886,10 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
        }
 }
 
-static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
+static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans)
 {
+       struct btrfs_fs_info *fs_info = trans->fs_info;
+
        /*
         * We use writeback_inodes_sb here because if we used
         * btrfs_start_delalloc_roots we would deadlock with fs freeze.
@@ -1897,15 +1899,50 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
         * from already being in a transaction and our join_transaction doesn't
         * have to re-take the fs freeze lock.
         */
-       if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
+       if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
                writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
+       } else {
+               struct btrfs_pending_snapshot *pending;
+               struct list_head *head = &trans->transaction->pending_snapshots;
+
+               /*
+                * Flush dellaloc for any root that is going to be snapshotted.
+                * This is done to avoid a corrupted version of files, in the
+                * snapshots, that had both buffered and direct IO writes (even
+                * if they were done sequentially) due to an unordered update of
+                * the inode's size on disk.
+                */
+               list_for_each_entry(pending, head, list) {
+                       int ret;
+
+                       ret = btrfs_start_delalloc_snapshot(pending->root);
+                       if (ret)
+                               return ret;
+               }
+       }
        return 0;
 }
 
-static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
+static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans)
 {
-       if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
+       struct btrfs_fs_info *fs_info = trans->fs_info;
+
+       if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
                btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+       } else {
+               struct btrfs_pending_snapshot *pending;
+               struct list_head *head = &trans->transaction->pending_snapshots;
+
+               /*
+                * Wait for any dellaloc that we started previously for the roots
+                * that are going to be snapshotted. This is to avoid a corrupted
+                * version of files in the snapshots that had both buffered and
+                * direct IO writes (even if they were done sequentially).
+                */
+               list_for_each_entry(pending, head, list)
+                       btrfs_wait_ordered_extents(pending->root,
+                                                  U64_MAX, 0, U64_MAX);
+       }
 }
 
 int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
@@ -2023,7 +2060,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
 
        extwriter_counter_dec(cur_trans, trans->type);
 
-       ret = btrfs_start_delalloc_flush(fs_info);
+       ret = btrfs_start_delalloc_flush(trans);
        if (ret)
                goto cleanup_transaction;
 
@@ -2039,7 +2076,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
        if (ret)
                goto cleanup_transaction;
 
-       btrfs_wait_delalloc_flush(fs_info);
+       btrfs_wait_delalloc_flush(trans);
 
        btrfs_scrub_pause(fs_info);
        /*
index f06454a55e00cb4df0f71f03eb0013adbae1e4f4..561884f60d35c36e11928e28e5007901fc695198 100644 (file)
@@ -3578,9 +3578,16 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
        }
        btrfs_release_path(path);
 
-       /* find the first key from this transaction again */
+       /*
+        * Find the first key from this transaction again.  See the note for
+        * log_new_dir_dentries, if we're logging a directory recursively we
+        * won't be holding its i_mutex, which means we can modify the directory
+        * while we're logging it.  If we remove an entry between our first
+        * search and this search we'll not find the key again and can just
+        * bail.
+        */
        ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
-       if (WARN_ON(ret != 0))
+       if (ret != 0)
                goto done;
 
        /*
@@ -4544,6 +4551,19 @@ static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
                item = btrfs_item_ptr(path->nodes[0], path->slots[0],
                                      struct btrfs_inode_item);
                *size_ret = btrfs_inode_size(path->nodes[0], item);
+               /*
+                * If the in-memory inode's i_size is smaller then the inode
+                * size stored in the btree, return the inode's i_size, so
+                * that we get a correct inode size after replaying the log
+                * when before a power failure we had a shrinking truncate
+                * followed by addition of a new name (rename / new hard link).
+                * Otherwise return the inode size from the btree, to avoid
+                * data loss when replaying a log due to previously doing a
+                * write that expands the inode's size and logging a new name
+                * immediately after.
+                */
+               if (*size_ret > inode->vfs_inode.i_size)
+                       *size_ret = inode->vfs_inode.i_size;
        }
 
        btrfs_release_path(path);
@@ -4705,15 +4725,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
                                        struct btrfs_file_extent_item);
 
                if (btrfs_file_extent_type(leaf, extent) ==
-                   BTRFS_FILE_EXTENT_INLINE) {
-                       len = btrfs_file_extent_ram_bytes(leaf, extent);
-                       ASSERT(len == i_size ||
-                              (len == fs_info->sectorsize &&
-                               btrfs_file_extent_compression(leaf, extent) !=
-                               BTRFS_COMPRESS_NONE) ||
-                              (len < i_size && i_size < fs_info->sectorsize));
+                   BTRFS_FILE_EXTENT_INLINE)
                        return 0;
-               }
 
                len = btrfs_file_extent_num_bytes(leaf, extent);
                /* Last extent goes beyond i_size, no need to log a hole. */
index 9024eee889b9838caa2799ca51f439106a955ff0..db934ceae9c109f39eade85457a25b8ce99604ae 100644 (file)
@@ -6407,7 +6407,7 @@ static void btrfs_end_bio(struct bio *bio)
                                if (bio_op(bio) == REQ_OP_WRITE)
                                        btrfs_dev_stat_inc_and_print(dev,
                                                BTRFS_DEV_STAT_WRITE_ERRS);
-                               else
+                               else if (!(bio->bi_opf & REQ_RAHEAD))
                                        btrfs_dev_stat_inc_and_print(dev,
                                                BTRFS_DEV_STAT_READ_ERRS);
                                if (bio->bi_opf & REQ_PREFLUSH)
index a8f429882249476303868e4b68f272653ad72ebb..0637149fb9f9a7d26b383a2abedc08bb026d301e 100644 (file)
@@ -1766,6 +1766,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
 {
        struct ceph_inode_info *dci = ceph_inode(dir);
+       unsigned hash;
 
        switch (dci->i_dir_layout.dl_dir_hash) {
        case 0: /* for backward compat */
@@ -1773,8 +1774,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
                return dn->d_name.hash;
 
        default:
-               return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
+               spin_lock(&dn->d_lock);
+               hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
                                     dn->d_name.name, dn->d_name.len);
+               spin_unlock(&dn->d_lock);
+               return hash;
        }
 }
 
index e3346628efe2e221c844db3af6b4336d07b4f7f1..c2feb310ac1e0d7bd0c263f4d5944cc2c22852ac 100644 (file)
@@ -524,6 +524,7 @@ static void ceph_i_callback(struct rcu_head *head)
        struct inode *inode = container_of(head, struct inode, i_rcu);
        struct ceph_inode_info *ci = ceph_inode(inode);
 
+       kfree(ci->i_symlink);
        kmem_cache_free(ceph_inode_cachep, ci);
 }
 
@@ -566,7 +567,6 @@ void ceph_destroy_inode(struct inode *inode)
                }
        }
 
-       kfree(ci->i_symlink);
        while ((n = rb_first(&ci->i_fragtree)) != NULL) {
                frag = rb_entry(n, struct ceph_inode_frag, node);
                rb_erase(n, &ci->i_fragtree);
@@ -1163,6 +1163,19 @@ static int splice_dentry(struct dentry **pdn, struct inode *in)
        return 0;
 }
 
+static int d_name_cmp(struct dentry *dentry, const char *name, size_t len)
+{
+       int ret;
+
+       /* take d_lock to ensure dentry->d_name stability */
+       spin_lock(&dentry->d_lock);
+       ret = dentry->d_name.len - len;
+       if (!ret)
+               ret = memcmp(dentry->d_name.name, name, len);
+       spin_unlock(&dentry->d_lock);
+       return ret;
+}
+
 /*
  * Incorporate results into the local cache.  This is either just
  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
@@ -1412,7 +1425,8 @@ retry_lookup:
                err = splice_dentry(&req->r_dentry, in);
                if (err < 0)
                        goto done;
-       } else if (rinfo->head->is_dentry) {
+       } else if (rinfo->head->is_dentry &&
+                  !d_name_cmp(req->r_dentry, rinfo->dname, rinfo->dname_len)) {
                struct ceph_vino *ptvino = NULL;
 
                if ((le32_to_cpu(rinfo->diri.in->cap.caps) & CEPH_CAP_FILE_SHARED) ||
index 21c33ed048ed7095aa2347159ab472d8550b9721..9049c2a3e972f499ea1371e8c4b8112ead98a6e3 100644 (file)
@@ -1414,6 +1414,15 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                        list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
                        ci->i_prealloc_cap_flush = NULL;
                }
+
+               if (drop &&
+                  ci->i_wrbuffer_ref_head == 0 &&
+                  ci->i_wr_ref == 0 &&
+                  ci->i_dirty_caps == 0 &&
+                  ci->i_flushing_caps == 0) {
+                      ceph_put_snap_context(ci->i_head_snapc);
+                      ci->i_head_snapc = NULL;
+               }
        }
        spin_unlock(&ci->i_ceph_lock);
        while (!list_empty(&to_remove)) {
@@ -2161,10 +2170,39 @@ retry:
        return path;
 }
 
+/* Duplicate the dentry->d_name.name safely */
+static int clone_dentry_name(struct dentry *dentry, const char **ppath,
+                            int *ppathlen)
+{
+       u32 len;
+       char *name;
+
+retry:
+       len = READ_ONCE(dentry->d_name.len);
+       name = kmalloc(len + 1, GFP_NOFS);
+       if (!name)
+               return -ENOMEM;
+
+       spin_lock(&dentry->d_lock);
+       if (dentry->d_name.len != len) {
+               spin_unlock(&dentry->d_lock);
+               kfree(name);
+               goto retry;
+       }
+       memcpy(name, dentry->d_name.name, len);
+       spin_unlock(&dentry->d_lock);
+
+       name[len] = '\0';
+       *ppath = name;
+       *ppathlen = len;
+       return 0;
+}
+
 static int build_dentry_path(struct dentry *dentry, struct inode *dir,
                             const char **ppath, int *ppathlen, u64 *pino,
-                            int *pfreepath)
+                            bool *pfreepath, bool parent_locked)
 {
+       int ret;
        char *path;
 
        rcu_read_lock();
@@ -2173,8 +2211,15 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
        if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
                *pino = ceph_ino(dir);
                rcu_read_unlock();
-               *ppath = dentry->d_name.name;
-               *ppathlen = dentry->d_name.len;
+               if (parent_locked) {
+                       *ppath = dentry->d_name.name;
+                       *ppathlen = dentry->d_name.len;
+               } else {
+                       ret = clone_dentry_name(dentry, ppath, ppathlen);
+                       if (ret)
+                               return ret;
+                       *pfreepath = true;
+               }
                return 0;
        }
        rcu_read_unlock();
@@ -2182,13 +2227,13 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
        if (IS_ERR(path))
                return PTR_ERR(path);
        *ppath = path;
-       *pfreepath = 1;
+       *pfreepath = true;
        return 0;
 }
 
 static int build_inode_path(struct inode *inode,
                            const char **ppath, int *ppathlen, u64 *pino,
-                           int *pfreepath)
+                           bool *pfreepath)
 {
        struct dentry *dentry;
        char *path;
@@ -2204,7 +2249,7 @@ static int build_inode_path(struct inode *inode,
        if (IS_ERR(path))
                return PTR_ERR(path);
        *ppath = path;
-       *pfreepath = 1;
+       *pfreepath = true;
        return 0;
 }
 
@@ -2215,7 +2260,7 @@ static int build_inode_path(struct inode *inode,
 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
                                  struct inode *rdiri, const char *rpath,
                                  u64 rino, const char **ppath, int *pathlen,
-                                 u64 *ino, int *freepath)
+                                 u64 *ino, bool *freepath, bool parent_locked)
 {
        int r = 0;
 
@@ -2225,7 +2270,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
                     ceph_snap(rinode));
        } else if (rdentry) {
                r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
-                                       freepath);
+                                       freepath, parent_locked);
                dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
                     *ppath);
        } else if (rpath || rino) {
@@ -2251,7 +2296,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
        const char *path2 = NULL;
        u64 ino1 = 0, ino2 = 0;
        int pathlen1 = 0, pathlen2 = 0;
-       int freepath1 = 0, freepath2 = 0;
+       bool freepath1 = false, freepath2 = false;
        int len;
        u16 releases;
        void *p, *end;
@@ -2259,16 +2304,19 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
 
        ret = set_request_path_attr(req->r_inode, req->r_dentry,
                              req->r_parent, req->r_path1, req->r_ino1.ino,
-                             &path1, &pathlen1, &ino1, &freepath1);
+                             &path1, &pathlen1, &ino1, &freepath1,
+                             test_bit(CEPH_MDS_R_PARENT_LOCKED,
+                                       &req->r_req_flags));
        if (ret < 0) {
                msg = ERR_PTR(ret);
                goto out;
        }
 
+       /* If r_old_dentry is set, then assume that its parent is locked */
        ret = set_request_path_attr(NULL, req->r_old_dentry,
                              req->r_old_dentry_dir,
                              req->r_path2, req->r_ino2.ino,
-                             &path2, &pathlen2, &ino2, &freepath2);
+                             &path2, &pathlen2, &ino2, &freepath2, true);
        if (ret < 0) {
                msg = ERR_PTR(ret);
                goto out_free1;
index 89aa37fa0f84c55fe3324e50b554f6fcef5b5be5..b26e12cd8ec3317f44e04baaca375826a974e632 100644 (file)
@@ -572,7 +572,12 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
        old_snapc = NULL;
 
 update_snapc:
-       if (ci->i_head_snapc) {
+       if (ci->i_wrbuffer_ref_head == 0 &&
+           ci->i_wr_ref == 0 &&
+           ci->i_dirty_caps == 0 &&
+           ci->i_flushing_caps == 0) {
+               ci->i_head_snapc = NULL;
+       } else {
                ci->i_head_snapc = ceph_get_snap_context(new_snapc);
                dout(" new snapc is %p\n", new_snapc);
        }
index 217276b8b942f59d96fa74edc9d119402d3c7031..a05bf1d6e1d04143da40126e0a07ea927347001e 100644 (file)
@@ -559,6 +559,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
                        tcon->ses->server->echo_interval / HZ);
        if (tcon->snapshot_time)
                seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
+       if (tcon->handle_timeout)
+               seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
        /* convert actimeo and display it in seconds */
        seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
 
@@ -1008,7 +1010,7 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
        unsigned int xid;
        int rc;
 
-       if (remap_flags & ~REMAP_FILE_ADVISORY)
+       if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
                return -EINVAL;
 
        cifs_dbg(FYI, "clone range\n");
index 142164ef1f05fe7befc2ad5df61409336b422fb7..5c0298b9998fc326795c435f6050c3563bdf09f4 100644 (file)
@@ -150,5 +150,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.18"
+#define CIFS_VERSION   "2.19"
 #endif                         /* _CIFSFS_H */
index 38feae812b4704b315ee3adfe2a1eaa2c4740e45..585ad3207cb120a34c3da24e418dd20a6daf04cf 100644 (file)
  */
 #define CIFS_MAX_ACTIMEO (1 << 30)
 
+/*
+ * Max persistent and resilient handle timeout (milliseconds).
+ * Windows durable max was 960000 (16 minutes)
+ */
+#define SMB3_MAX_HANDLE_TIMEOUT 960000
+
 /*
  * MAX_REQ is the maximum number of requests that WE will send
  * on one socket concurrently.
@@ -586,6 +592,7 @@ struct smb_vol {
        struct nls_table *local_nls;
        unsigned int echo_interval; /* echo interval in secs */
        __u64 snapshot_time; /* needed for timewarp tokens */
+       __u32 handle_timeout; /* persistent and durable handle timeout in ms */
        unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
 };
 
@@ -1058,6 +1065,7 @@ struct cifs_tcon {
        __u32 vol_serial_number;
        __le64 vol_create_time;
        __u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */
+       __u32 handle_timeout; /* persistent and durable handle timeout in ms */
        __u32 ss_flags;         /* sector size flags */
        __u32 perf_sector_size; /* best sector size for perf */
        __u32 max_chunks;
@@ -1325,6 +1333,7 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
 }
 
 struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr);
 void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
 
 #define CIFS_CACHE_READ_FLG    1
@@ -1847,6 +1856,7 @@ GLOBAL_EXTERN spinlock_t gidsidlock;
 #endif /* CONFIG_CIFS_ACL */
 
 void cifs_oplock_break(struct work_struct *work);
+void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
 
 extern const struct slow_work_ops cifs_oplock_break_ops;
 extern struct workqueue_struct *cifsiod_wq;
index a8e9738db691294736105bf8a0cd03772a9a2447..4c0e44489f21497670131b5a889ae6f4eb0b843e 100644 (file)
@@ -103,7 +103,7 @@ enum {
        Opt_cruid, Opt_gid, Opt_file_mode,
        Opt_dirmode, Opt_port,
        Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo,
-       Opt_echo_interval, Opt_max_credits,
+       Opt_echo_interval, Opt_max_credits, Opt_handletimeout,
        Opt_snapshot,
 
        /* Mount options which take string value */
@@ -208,6 +208,7 @@ static const match_table_t cifs_mount_option_tokens = {
        { Opt_rsize, "rsize=%s" },
        { Opt_wsize, "wsize=%s" },
        { Opt_actimeo, "actimeo=%s" },
+       { Opt_handletimeout, "handletimeout=%s" },
        { Opt_echo_interval, "echo_interval=%s" },
        { Opt_max_credits, "max_credits=%s" },
        { Opt_snapshot, "snapshot=%s" },
@@ -1619,6 +1620,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
 
        vol->actimeo = CIFS_DEF_ACTIMEO;
 
+       /* Most clients set timeout to 0, allows server to use its default */
+       vol->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
+
        /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */
        vol->ops = &smb30_operations;
        vol->vals = &smbdefault_values;
@@ -2017,6 +2021,18 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                                goto cifs_parse_mount_err;
                        }
                        break;
+               case Opt_handletimeout:
+                       if (get_option_ul(args, &option)) {
+                               cifs_dbg(VFS, "%s: Invalid handletimeout value\n",
+                                        __func__);
+                               goto cifs_parse_mount_err;
+                       }
+                       vol->handle_timeout = option;
+                       if (vol->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) {
+                               cifs_dbg(VFS, "Invalid handle cache timeout, longer than 16 minutes\n");
+                               goto cifs_parse_mount_err;
+                       }
+                       break;
                case Opt_echo_interval:
                        if (get_option_ul(args, &option)) {
                                cifs_dbg(VFS, "%s: Invalid echo interval value\n",
@@ -3183,6 +3199,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
                return 0;
        if (tcon->snapshot_time != volume_info->snapshot_time)
                return 0;
+       if (tcon->handle_timeout != volume_info->handle_timeout)
+               return 0;
        return 1;
 }
 
@@ -3297,6 +3315,16 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
                        tcon->snapshot_time = volume_info->snapshot_time;
        }
 
+       if (volume_info->handle_timeout) {
+               if (ses->server->vals->protocol_id == 0) {
+                       cifs_dbg(VFS,
+                            "Use SMB2.1 or later for handle timeout option\n");
+                       rc = -EOPNOTSUPP;
+                       goto out_fail;
+               } else
+                       tcon->handle_timeout = volume_info->handle_timeout;
+       }
+
        tcon->ses = ses;
        if (volume_info->password) {
                tcon->password = kstrdup(volume_info->password, GFP_KERNEL);
index 2a6d20c0ce0288d37ad2405424200be3fefaa9f4..7037a137fa5330c807da19a91acd054d76ad031a 100644 (file)
@@ -360,12 +360,30 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
        return cifs_file;
 }
 
-/*
- * Release a reference on the file private data. This may involve closing
- * the filehandle out on the server. Must be called without holding
- * tcon->open_file_lock and cifs_file->file_info_lock.
+/**
+ * cifsFileInfo_put - release a reference of file priv data
+ *
+ * Always potentially wait for oplock handler. See _cifsFileInfo_put().
  */
 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+{
+       _cifsFileInfo_put(cifs_file, true);
+}
+
+/**
+ * _cifsFileInfo_put - release a reference of file priv data
+ *
+ * This may involve closing the filehandle @cifs_file out on the
+ * server. Must be called without holding tcon->open_file_lock and
+ * cifs_file->file_info_lock.
+ *
+ * If @wait_for_oplock_handler is true and we are releasing the last
+ * reference, wait for any running oplock break handler of the file
+ * and cancel any pending one. If calling this function from the
+ * oplock break handler, you need to pass false.
+ *
+ */
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
 {
        struct inode *inode = d_inode(cifs_file->dentry);
        struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
@@ -414,7 +432,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
 
        spin_unlock(&tcon->open_file_lock);
 
-       oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
+       oplock_break_cancelled = wait_oplock_handler ?
+               cancel_work_sync(&cifs_file->oplock_break) : false;
 
        if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
                struct TCP_Server_Info *server = tcon->ses->server;
@@ -2632,43 +2651,56 @@ cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
        struct TCP_Server_Info *server =
                tlink_tcon(wdata->cfile->tlink)->ses->server;
 
-       /*
-        * Wait for credits to resend this wdata.
-        * Note: we are attempting to resend the whole wdata not in segments
-        */
        do {
-               rc = server->ops->wait_mtu_credits(server, wdata->bytes, &wsize,
-                                                  &credits);
+               if (wdata->cfile->invalidHandle) {
+                       rc = cifs_reopen_file(wdata->cfile, false);
+                       if (rc == -EAGAIN)
+                               continue;
+                       else if (rc)
+                               break;
+               }
 
-               if (rc)
-                       goto out;
 
-               if (wsize < wdata->bytes) {
-                       add_credits_and_wake_if(server, &credits, 0);
-                       msleep(1000);
-               }
-       } while (wsize < wdata->bytes);
+               /*
+                * Wait for credits to resend this wdata.
+                * Note: we are attempting to resend the whole wdata not in
+                * segments
+                */
+               do {
+                       rc = server->ops->wait_mtu_credits(server, wdata->bytes,
+                                               &wsize, &credits);
+                       if (rc)
+                               goto fail;
+
+                       if (wsize < wdata->bytes) {
+                               add_credits_and_wake_if(server, &credits, 0);
+                               msleep(1000);
+                       }
+               } while (wsize < wdata->bytes);
+               wdata->credits = credits;
 
-       wdata->credits = credits;
-       rc = -EAGAIN;
-       while (rc == -EAGAIN) {
-               rc = 0;
-               if (wdata->cfile->invalidHandle)
-                       rc = cifs_reopen_file(wdata->cfile, false);
-               if (!rc)
-                       rc = server->ops->async_writev(wdata,
+               rc = adjust_credits(server, &wdata->credits, wdata->bytes);
+
+               if (!rc) {
+                       if (wdata->cfile->invalidHandle)
+                               rc = -EAGAIN;
+                       else
+                               rc = server->ops->async_writev(wdata,
                                        cifs_uncached_writedata_release);
-       }
+               }
 
-       if (!rc) {
-               list_add_tail(&wdata->list, wdata_list);
-               return 0;
-       }
+               /* If the write was successfully sent, we are done */
+               if (!rc) {
+                       list_add_tail(&wdata->list, wdata_list);
+                       return 0;
+               }
 
-       add_credits_and_wake_if(server, &wdata->credits, 0);
-out:
-       kref_put(&wdata->refcount, cifs_uncached_writedata_release);
+               /* Roll back credits and retry if needed */
+               add_credits_and_wake_if(server, &wdata->credits, 0);
+       } while (rc == -EAGAIN);
 
+fail:
+       kref_put(&wdata->refcount, cifs_uncached_writedata_release);
        return rc;
 }
 
@@ -2845,7 +2877,6 @@ static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
        struct cifs_tcon *tcon;
        struct cifs_sb_info *cifs_sb;
        struct dentry *dentry = ctx->cfile->dentry;
-       unsigned int i;
        int rc;
 
        tcon = tlink_tcon(ctx->cfile->tlink);
@@ -2896,12 +2927,12 @@ restart_loop:
                                                wdata->bytes, &tmp_from,
                                                ctx->cfile, cifs_sb, &tmp_list,
                                                ctx);
+
+                                       kref_put(&wdata->refcount,
+                                               cifs_uncached_writedata_release);
                                }
 
                                list_splice(&tmp_list, &ctx->list);
-
-                               kref_put(&wdata->refcount,
-                                        cifs_uncached_writedata_release);
                                goto restart_loop;
                        }
                }
@@ -2909,10 +2940,6 @@ restart_loop:
                kref_put(&wdata->refcount, cifs_uncached_writedata_release);
        }
 
-       if (!ctx->direct_io)
-               for (i = 0; i < ctx->npages; i++)
-                       put_page(ctx->bv[i].bv_page);
-
        cifs_stats_bytes_written(tcon, ctx->total_len);
        set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
 
@@ -3348,44 +3375,55 @@ static int cifs_resend_rdata(struct cifs_readdata *rdata,
        struct TCP_Server_Info *server =
                tlink_tcon(rdata->cfile->tlink)->ses->server;
 
-       /*
-        * Wait for credits to resend this rdata.
-        * Note: we are attempting to resend the whole rdata not in segments
-        */
        do {
-               rc = server->ops->wait_mtu_credits(server, rdata->bytes,
+               if (rdata->cfile->invalidHandle) {
+                       rc = cifs_reopen_file(rdata->cfile, true);
+                       if (rc == -EAGAIN)
+                               continue;
+                       else if (rc)
+                               break;
+               }
+
+               /*
+                * Wait for credits to resend this rdata.
+                * Note: we are attempting to resend the whole rdata not in
+                * segments
+                */
+               do {
+                       rc = server->ops->wait_mtu_credits(server, rdata->bytes,
                                                &rsize, &credits);
 
-               if (rc)
-                       goto out;
+                       if (rc)
+                               goto fail;
 
-               if (rsize < rdata->bytes) {
-                       add_credits_and_wake_if(server, &credits, 0);
-                       msleep(1000);
-               }
-       } while (rsize < rdata->bytes);
+                       if (rsize < rdata->bytes) {
+                               add_credits_and_wake_if(server, &credits, 0);
+                               msleep(1000);
+                       }
+               } while (rsize < rdata->bytes);
+               rdata->credits = credits;
 
-       rdata->credits = credits;
-       rc = -EAGAIN;
-       while (rc == -EAGAIN) {
-               rc = 0;
-               if (rdata->cfile->invalidHandle)
-                       rc = cifs_reopen_file(rdata->cfile, true);
-               if (!rc)
-                       rc = server->ops->async_readv(rdata);
-       }
+               rc = adjust_credits(server, &rdata->credits, rdata->bytes);
+               if (!rc) {
+                       if (rdata->cfile->invalidHandle)
+                               rc = -EAGAIN;
+                       else
+                               rc = server->ops->async_readv(rdata);
+               }
 
-       if (!rc) {
-               /* Add to aio pending list */
-               list_add_tail(&rdata->list, rdata_list);
-               return 0;
-       }
+               /* If the read was successfully sent, we are done */
+               if (!rc) {
+                       /* Add to aio pending list */
+                       list_add_tail(&rdata->list, rdata_list);
+                       return 0;
+               }
 
-       add_credits_and_wake_if(server, &rdata->credits, 0);
-out:
-       kref_put(&rdata->refcount,
-               cifs_uncached_readdata_release);
+               /* Roll back credits and retry if needed */
+               add_credits_and_wake_if(server, &rdata->credits, 0);
+       } while (rc == -EAGAIN);
 
+fail:
+       kref_put(&rdata->refcount, cifs_uncached_readdata_release);
        return rc;
 }
 
@@ -3539,7 +3577,6 @@ collect_uncached_read_data(struct cifs_aio_ctx *ctx)
        struct iov_iter *to = &ctx->iter;
        struct cifs_sb_info *cifs_sb;
        struct cifs_tcon *tcon;
-       unsigned int i;
        int rc;
 
        tcon = tlink_tcon(ctx->cfile->tlink);
@@ -3623,15 +3660,8 @@ again:
                kref_put(&rdata->refcount, cifs_uncached_readdata_release);
        }
 
-       if (!ctx->direct_io) {
-               for (i = 0; i < ctx->npages; i++) {
-                       if (ctx->should_dirty)
-                               set_page_dirty(ctx->bv[i].bv_page);
-                       put_page(ctx->bv[i].bv_page);
-               }
-
+       if (!ctx->direct_io)
                ctx->total_len = ctx->len - iov_iter_count(to);
-       }
 
        /* mask nodata case */
        if (rc == -ENODATA)
@@ -4579,6 +4609,7 @@ void cifs_oplock_break(struct work_struct *work)
                                                             cinode);
                cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
        }
+       _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
        cifs_done_oplock_break(cinode);
 }
 
index 53fdb5df0d2ebd67b2687b76d98441ee7faa41be..538fd7d807e476f9998820b2abc93ae7f6a7c127 100644 (file)
@@ -1735,6 +1735,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
        if (rc == 0 || rc != -EBUSY)
                goto do_rename_exit;
 
+       /* Don't fall back to using SMB on SMB 2+ mount */
+       if (server->vals->protocol_id != 0)
+               goto do_rename_exit;
+
        /* open-file renames don't work across directories */
        if (to_dentry->d_parent != from_dentry->d_parent)
                goto do_rename_exit;
index bee203055b300b1d13a284e9571225f7ba4888c8..0dc6f08020acbc81dbc99966cb229842c688acb2 100644 (file)
@@ -501,8 +501,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
                                           CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
                                           &pCifsInode->flags);
 
-                               queue_work(cifsoplockd_wq,
-                                          &netfile->oplock_break);
+                               cifs_queue_oplock_break(netfile);
                                netfile->oplock_break_cancelled = false;
 
                                spin_unlock(&tcon->open_file_lock);
@@ -607,6 +606,28 @@ void cifs_put_writer(struct cifsInodeInfo *cinode)
        spin_unlock(&cinode->writers_lock);
 }
 
+/**
+ * cifs_queue_oplock_break - queue the oplock break handler for cfile
+ *
+ * This function is called from the demultiplex thread when it
+ * receives an oplock break for @cfile.
+ *
+ * Assumes the tcon->open_file_lock is held.
+ * Assumes cfile->file_info_lock is NOT held.
+ */
+void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
+{
+       /*
+        * Bump the handle refcount now while we hold the
+        * open_file_lock to enforce the validity of it for the oplock
+        * break handler. The matching put is done at the end of the
+        * handler.
+        */
+       cifsFileInfo_get(cfile);
+
+       queue_work(cifsoplockd_wq, &cfile->oplock_break);
+}
+
 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
 {
        clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
@@ -768,6 +789,11 @@ cifs_aio_ctx_alloc(void)
 {
        struct cifs_aio_ctx *ctx;
 
+       /*
+        * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
+        * to false so that we know when we have to unreference pages within
+        * cifs_aio_ctx_release()
+        */
        ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
        if (!ctx)
                return NULL;
@@ -786,7 +812,23 @@ cifs_aio_ctx_release(struct kref *refcount)
                                        struct cifs_aio_ctx, refcount);
 
        cifsFileInfo_put(ctx->cfile);
-       kvfree(ctx->bv);
+
+       /*
+        * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
+        * which means that iov_iter_get_pages() was a success and thus that
+        * we have taken reference on pages.
+        */
+       if (ctx->bv) {
+               unsigned i;
+
+               for (i = 0; i < ctx->npages; i++) {
+                       if (ctx->should_dirty)
+                               set_page_dirty(ctx->bv[i].bv_page);
+                       put_page(ctx->bv[i].bv_page);
+               }
+               kvfree(ctx->bv);
+       }
+
        kfree(ctx);
 }
 
index b204e84b87fb52d938dc138379f7877ffb2ba74a..54bffb2a1786d00c5becdb9c2c275c0aa5f87c99 100644 (file)
@@ -68,13 +68,15 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
 
 
         if (oparms->tcon->use_resilient) {
-               nr_ioctl_req.Timeout = 0; /* use server default (120 seconds) */
+               /* default timeout is 0, servers pick default (120 seconds) */
+               nr_ioctl_req.Timeout =
+                       cpu_to_le32(oparms->tcon->handle_timeout);
                nr_ioctl_req.Reserved = 0;
                rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
                        fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
                        true /* is_fsctl */,
                        (char *)&nr_ioctl_req, sizeof(nr_ioctl_req),
-                       NULL, NULL /* no return info */);
+                       CIFSMaxBufSize, NULL, NULL /* no return info */);
                if (rc == -EOPNOTSUPP) {
                        cifs_dbg(VFS,
                             "resiliency not supported by server, disabling\n");
index 924269cec1352f75a592ba81e9865bbb030caafe..e32c264e3adbb847653911499ef635c586032c8a 100644 (file)
@@ -1036,7 +1036,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
        {STATUS_UNFINISHED_CONTEXT_DELETED, -EIO,
        "STATUS_UNFINISHED_CONTEXT_DELETED"},
        {STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"},
-       {STATUS_OBJECTID_NOT_FOUND, -EIO, "STATUS_OBJECTID_NOT_FOUND"},
+       /* Note that ENOATTTR and ENODATA are the same errno */
+       {STATUS_OBJECTID_NOT_FOUND, -ENODATA, "STATUS_OBJECTID_NOT_FOUND"},
        {STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"},
        {STATUS_WRONG_CREDENTIAL_HANDLE, -EIO,
        "STATUS_WRONG_CREDENTIAL_HANDLE"},
index 0e3570e40ff8e8d233389063290ccdecdfb44a25..e311f58dc1c82809de0283e434a9aff09356825f 100644 (file)
@@ -555,7 +555,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
                        clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
                                  &cinode->flags);
 
-               queue_work(cifsoplockd_wq, &cfile->oplock_break);
+               cifs_queue_oplock_break(cfile);
                kfree(lw);
                return true;
        }
@@ -712,8 +712,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
                                           CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
                                           &cinode->flags);
                                spin_unlock(&cfile->file_info_lock);
-                               queue_work(cifsoplockd_wq,
-                                          &cfile->oplock_break);
+
+                               cifs_queue_oplock_break(cfile);
 
                                spin_unlock(&tcon->open_file_lock);
                                spin_unlock(&cifs_tcp_ses_lock);
index 1022a3771e140d819e767ba5a75677a88d65f911..c36ff0d1fe2a8b7b2668466464fc9da9e45a774f 100644 (file)
@@ -581,7 +581,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                        FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
                        NULL /* no data input */, 0 /* no data input */,
-                       (char **)&out_buf, &ret_data_len);
+                       CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
        if (rc == -EOPNOTSUPP) {
                cifs_dbg(FYI,
                         "server does not support query network interfaces\n");
@@ -717,32 +717,28 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
        oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
 #endif /* CIFS_DEBUG2 */
 
-       if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
-               oplock = smb2_parse_lease_state(server, o_rsp,
-                                               &oparms.fid->epoch,
-                                               oparms.fid->lease_key);
-       else
-               goto oshr_exit;
-
-
        memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
        tcon->crfid.tcon = tcon;
        tcon->crfid.is_valid = true;
        kref_init(&tcon->crfid.refcount);
-       kref_get(&tcon->crfid.refcount);
 
+       if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
+               kref_get(&tcon->crfid.refcount);
+               oplock = smb2_parse_lease_state(server, o_rsp,
+                                               &oparms.fid->epoch,
+                                               oparms.fid->lease_key);
+       } else
+               goto oshr_exit;
 
        qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
        if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
                goto oshr_exit;
-       rc = smb2_validate_and_copy_iov(
+       if (!smb2_validate_and_copy_iov(
                                le16_to_cpu(qi_rsp->OutputBufferOffset),
                                sizeof(struct smb2_file_all_info),
                                &rsp_iov[1], sizeof(struct smb2_file_all_info),
-                               (char *)&tcon->crfid.file_all_info);
-       if (rc)
-               goto oshr_exit;
-       tcon->crfid.file_all_info_is_valid = 1;
+                               (char *)&tcon->crfid.file_all_info))
+               tcon->crfid.file_all_info_is_valid = 1;
 
  oshr_exit:
        mutex_unlock(&tcon->crfid.fid_mutex);
@@ -1299,7 +1295,7 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
 
        rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
                        FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
-                       NULL, 0 /* no input */,
+                       NULL, 0 /* no input */, CIFSMaxBufSize,
                        (char **)&res_key, &ret_data_len);
 
        if (rc) {
@@ -1404,7 +1400,7 @@ smb2_ioctl_query_info(const unsigned int xid,
                        rc = SMB2_ioctl_init(tcon, &rqst[1],
                                             COMPOUND_FID, COMPOUND_FID,
                                             qi.info_type, true, NULL,
-                                            0);
+                                            0, CIFSMaxBufSize);
                }
        } else if (qi.flags == PASSTHRU_QUERY_INFO) {
                memset(&qi_iov, 0, sizeof(qi_iov));
@@ -1532,8 +1528,8 @@ smb2_copychunk_range(const unsigned int xid,
                rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
                        trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
                        true /* is_fsctl */, (char *)pcchunk,
-                       sizeof(struct copychunk_ioctl), (char **)&retbuf,
-                       &ret_data_len);
+                       sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
+                       (char **)&retbuf, &ret_data_len);
                if (rc == 0) {
                        if (ret_data_len !=
                                        sizeof(struct copychunk_ioctl_rsp)) {
@@ -1693,7 +1689,7 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
                        true /* is_fctl */,
-                       &setsparse, 1, NULL, NULL);
+                       &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
        if (rc) {
                tcon->broken_sparse_sup = true;
                cifs_dbg(FYI, "set sparse rc = %d\n", rc);
@@ -1766,7 +1762,7 @@ smb2_duplicate_extents(const unsigned int xid,
                        true /* is_fsctl */,
                        (char *)&dup_ext_buf,
                        sizeof(struct duplicate_extents_to_file),
-                       NULL,
+                       CIFSMaxBufSize, NULL,
                        &ret_data_len);
 
        if (ret_data_len > 0)
@@ -1801,7 +1797,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
                        true /* is_fsctl */,
                        (char *)&integr_info,
                        sizeof(struct fsctl_set_integrity_information_req),
-                       NULL,
+                       CIFSMaxBufSize, NULL,
                        &ret_data_len);
 
 }
@@ -1809,6 +1805,8 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
 /* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
 #define GMT_TOKEN_SIZE 50
 
+#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
+
 /*
  * Input buffer contains (empty) struct smb_snapshot array with size filled in
  * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
@@ -1820,13 +1818,29 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
        char *retbuf = NULL;
        unsigned int ret_data_len = 0;
        int rc;
+       u32 max_response_size;
        struct smb_snapshot_array snapshot_in;
 
+       if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
+               return -EFAULT;
+
+       /*
+        * Note that for snapshot queries that servers like Azure expect that
+        * the first query be minimal size (and just used to get the number/size
+        * of previous versions) so response size must be specified as EXACTLY
+        * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
+        * of eight bytes.
+        */
+       if (ret_data_len == 0)
+               max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
+       else
+               max_response_size = CIFSMaxBufSize;
+
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid,
                        FSCTL_SRV_ENUMERATE_SNAPSHOTS,
                        true /* is_fsctl */,
-                       NULL, 0 /* no input data */,
+                       NULL, 0 /* no input data */, max_response_size,
                        (char **)&retbuf,
                        &ret_data_len);
        cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
@@ -2304,7 +2318,7 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
                rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                                FSCTL_DFS_GET_REFERRALS,
                                true /* is_fsctl */,
-                               (char *)dfs_req, dfs_req_size,
+                               (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
                                (char **)&dfs_rsp, &dfs_rsp_size);
        } while (rc == -EAGAIN);
 
@@ -2375,6 +2389,8 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
 
        rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov,
                       &resp_buftype);
+       if (!rc)
+               SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
        if (!rc || !err_iov.iov_base) {
                rc = -ENOENT;
                goto free_path;
@@ -2658,7 +2674,8 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
        rc = SMB2_ioctl_init(tcon, &rqst[num++], cfile->fid.persistent_fid,
                             cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
                             true /* is_fctl */, (char *)&fsctl_buf,
-                            sizeof(struct file_zero_data_information));
+                            sizeof(struct file_zero_data_information),
+                            CIFSMaxBufSize);
        if (rc)
                goto zero_range_exit;
 
@@ -2735,7 +2752,8 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
        rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
                        cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
                        true /* is_fctl */, (char *)&fsctl_buf,
-                       sizeof(struct file_zero_data_information), NULL, NULL);
+                       sizeof(struct file_zero_data_information),
+                       CIFSMaxBufSize, NULL, NULL);
        free_xid(xid);
        return rc;
 }
index c399e09b76e62a7c733857075ff348b039e49a58..a37774a55f3aa1b8598ebd30b063fff67d4cb32f 100644 (file)
@@ -832,8 +832,11 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
                } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
                        /* ops set to 3.0 by default for default so update */
                        ses->server->ops = &smb21_operations;
-               } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
+                       ses->server->vals = &smb21_values;
+               } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
                        ses->server->ops = &smb311_operations;
+                       ses->server->vals = &smb311_values;
+               }
        } else if (le16_to_cpu(rsp->DialectRevision) !=
                                ses->server->vals->protocol_id) {
                /* if requested single dialect ensure returned dialect matched */
@@ -1002,7 +1005,8 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
 
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
-               (char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen);
+               (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
+               (char **)&pneg_rsp, &rsplen);
        if (rc == -EOPNOTSUPP) {
                /*
                 * Old Windows versions or Netapp SMB server can return
@@ -1628,9 +1632,16 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
        iov[1].iov_base = unc_path;
        iov[1].iov_len = unc_path_len;
 
-       /* 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1 */
+       /*
+        * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
+        * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
+        * (Samba servers don't always set the flag so also check if null user)
+        */
        if ((ses->server->dialect == SMB311_PROT_ID) &&
-           !smb3_encryption_required(tcon))
+           !smb3_encryption_required(tcon) &&
+           !(ses->session_flags &
+                   (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
+           ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
                req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
 
        memset(&rqst, 0, sizeof(struct smb_rqst));
@@ -1851,8 +1862,9 @@ add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
 }
 
 static struct create_durable_v2 *
-create_durable_v2_buf(struct cifs_fid *pfid)
+create_durable_v2_buf(struct cifs_open_parms *oparms)
 {
+       struct cifs_fid *pfid = oparms->fid;
        struct create_durable_v2 *buf;
 
        buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
@@ -1866,7 +1878,14 @@ create_durable_v2_buf(struct cifs_fid *pfid)
                                (struct create_durable_v2, Name));
        buf->ccontext.NameLength = cpu_to_le16(4);
 
-       buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
+       /*
+        * NB: Handle timeout defaults to 0, which allows server to choose
+        * (most servers default to 120 seconds) and most clients default to 0.
+        * This can be overridden at mount ("handletimeout=") if the user wants
+        * a different persistent (or resilient) handle timeout for all opens
+        * opens on a particular SMB3 mount.
+        */
+       buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
        buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
        generate_random_uuid(buf->dcontext.CreateGuid);
        memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
@@ -1919,7 +1938,7 @@ add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
        struct smb2_create_req *req = iov[0].iov_base;
        unsigned int num = *num_iovec;
 
-       iov[num].iov_base = create_durable_v2_buf(oparms->fid);
+       iov[num].iov_base = create_durable_v2_buf(oparms);
        if (iov[num].iov_base == NULL)
                return -ENOMEM;
        iov[num].iov_len = sizeof(struct create_durable_v2);
@@ -2471,7 +2490,8 @@ creat_exit:
 int
 SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
                u64 persistent_fid, u64 volatile_fid, u32 opcode,
-               bool is_fsctl, char *in_data, u32 indatalen)
+               bool is_fsctl, char *in_data, u32 indatalen,
+               __u32 max_response_size)
 {
        struct smb2_ioctl_req *req;
        struct kvec *iov = rqst->rq_iov;
@@ -2513,16 +2533,21 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
        req->OutputCount = 0; /* MBZ */
 
        /*
-        * Could increase MaxOutputResponse, but that would require more
-        * than one credit. Windows typically sets this smaller, but for some
+        * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
+        * We Could increase default MaxOutputResponse, but that could require
+        * more credits. Windows typically sets this smaller, but for some
         * ioctls it may be useful to allow server to send more. No point
         * limiting what the server can send as long as fits in one credit
-        * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE
-        * (by default, note that it can be overridden to make max larger)
-        * in responses (except for read responses which can be bigger.
-        * We may want to bump this limit up
+        * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
+        * to increase this limit up in the future.
+        * Note that for snapshot queries that servers like Azure expect that
+        * the first query be minimal size (and just used to get the number/size
+        * of previous versions) so response size must be specified as EXACTLY
+        * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
+        * of eight bytes.  Currently that is the only case where we set max
+        * response size smaller.
         */
-       req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize);
+       req->MaxOutputResponse = cpu_to_le32(max_response_size);
 
        if (is_fsctl)
                req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
@@ -2543,13 +2568,14 @@ SMB2_ioctl_free(struct smb_rqst *rqst)
                cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
 }
 
+
 /*
  *     SMB2 IOCTL is used for both IOCTLs and FSCTLs
  */
 int
 SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
           u64 volatile_fid, u32 opcode, bool is_fsctl,
-          char *in_data, u32 indatalen,
+          char *in_data, u32 indatalen, u32 max_out_data_len,
           char **out_data, u32 *plen /* returned data len */)
 {
        struct smb_rqst rqst;
@@ -2586,8 +2612,8 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        rqst.rq_iov = iov;
        rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
 
-       rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid,
-                            opcode, is_fsctl, in_data, indatalen);
+       rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, opcode,
+                            is_fsctl, in_data, indatalen, max_out_data_len);
        if (rc)
                goto ioctl_exit;
 
@@ -2665,7 +2691,8 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
        rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
                        FSCTL_SET_COMPRESSION, true /* is_fsctl */,
                        (char *)&fsctl_input /* data input */,
-                       2 /* in data len */, &ret_data /* out data */, NULL);
+                       2 /* in data len */, CIFSMaxBufSize /* max out data */,
+                       &ret_data /* out data */, NULL);
 
        cifs_dbg(FYI, "set compression rc %d\n", rc);
 
@@ -3424,8 +3451,6 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
        rqst.rq_nvec = 1;
 
        rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
-       cifs_small_buf_release(req);
-
        rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
 
        if (rc) {
@@ -3441,12 +3466,15 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
                                    io_parms->tcon->tid, ses->Suid,
                                    io_parms->offset, 0);
                free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+               cifs_small_buf_release(req);
                return rc == -ENODATA ? 0 : rc;
        } else
                trace_smb3_read_done(xid, req->PersistentFileId,
                                    io_parms->tcon->tid, ses->Suid,
                                    io_parms->offset, io_parms->length);
 
+       cifs_small_buf_release(req);
+
        *nbytes = le32_to_cpu(rsp->DataLength);
        if ((*nbytes > CIFS_MAX_MSGSIZE) ||
            (*nbytes > io_parms->length)) {
@@ -3745,7 +3773,6 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
 
        rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
                            &resp_buftype, flags, &rsp_iov);
-       cifs_small_buf_release(req);
        rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
 
        if (rc) {
@@ -3763,6 +3790,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
                                     io_parms->offset, *nbytes);
        }
 
+       cifs_small_buf_release(req);
        free_rsp_buf(resp_buftype, rsp);
        return rc;
 }
index 3c32d0cfea69b0c7191336e5b38247de578ed63b..52df125e918984139b176a5f4101ae6da11c7e0f 100644 (file)
@@ -142,11 +142,12 @@ extern int SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
 extern void SMB2_open_free(struct smb_rqst *rqst);
 extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
                     u64 persistent_fid, u64 volatile_fid, u32 opcode,
-                    bool is_fsctl, char *in_data, u32 indatalen,
+                    bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen,
                     char **out_data, u32 *plen /* returned data len */);
 extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
                           u64 persistent_fid, u64 volatile_fid, u32 opcode,
-                          bool is_fsctl, char *in_data, u32 indatalen);
+                          bool is_fsctl, char *in_data, u32 indatalen,
+                          __u32 max_response_size);
 extern void SMB2_ioctl_free(struct smb_rqst *rqst);
 extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
                      u64 persistent_file_id, u64 volatile_file_id);
index fa226de48ef38c0c0696aca767c69cc2ac8dbdd8..99c4d799c24b63c1f16efba4cf5ab1341c9ae90b 100644 (file)
@@ -549,19 +549,19 @@ DECLARE_EVENT_CLASS(smb3_tcon_class,
                __field(unsigned int, xid)
                __field(__u32, tid)
                __field(__u64, sesid)
-               __field(const char *,  unc_name)
+               __string(name, unc_name)
                __field(int, rc)
        ),
        TP_fast_assign(
                __entry->xid = xid;
                __entry->tid = tid;
                __entry->sesid = sesid;
-               __entry->unc_name = unc_name;
+               __assign_str(name, unc_name);
                __entry->rc = rc;
        ),
        TP_printk("xid=%u sid=0x%llx tid=0x%x unc_name=%s rc=%d",
                __entry->xid, __entry->sesid, __entry->tid,
-               __entry->unc_name, __entry->rc)
+               __get_str(name), __entry->rc)
 )
 
 #define DEFINE_SMB3_TCON_EVENT(name)          \
index ca0671d55aa699df6723ffb897706b6579c68780..e5e54da1715f630cf1471e625e72045b6f31e112 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -33,6 +33,7 @@
 #include <linux/sizes.h>
 #include <linux/mmu_notifier.h>
 #include <linux/iomap.h>
+#include <asm/pgalloc.h>
 #include "internal.h"
 
 #define CREATE_TRACE_POINTS
@@ -1407,7 +1408,9 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
 {
        struct address_space *mapping = vmf->vma->vm_file->f_mapping;
        unsigned long pmd_addr = vmf->address & PMD_MASK;
+       struct vm_area_struct *vma = vmf->vma;
        struct inode *inode = mapping->host;
+       pgtable_t pgtable = NULL;
        struct page *zero_page;
        spinlock_t *ptl;
        pmd_t pmd_entry;
@@ -1422,12 +1425,22 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
        *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
                        DAX_PMD | DAX_ZERO_PAGE, false);
 
+       if (arch_needs_pgtable_deposit()) {
+               pgtable = pte_alloc_one(vma->vm_mm);
+               if (!pgtable)
+                       return VM_FAULT_OOM;
+       }
+
        ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
        if (!pmd_none(*(vmf->pmd))) {
                spin_unlock(ptl);
                goto fallback;
        }
 
+       if (pgtable) {
+               pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
+               mm_inc_nr_ptes(vma->vm_mm);
+       }
        pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
        pmd_entry = pmd_mkhuge(pmd_entry);
        set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
@@ -1436,6 +1449,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
        return VM_FAULT_NOPAGE;
 
 fallback:
+       if (pgtable)
+               pte_free(vma->vm_mm, pgtable);
        trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
        return VM_FAULT_FALLBACK;
 }
index 95b5e78c22b1e98811d3aca9c64c2c5deb54c6fe..f25daa207421c50cf38b1e4771ef5ab3332e9be1 100644 (file)
@@ -163,19 +163,24 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
        return 0;
 }
 
-static void debugfs_evict_inode(struct inode *inode)
+static void debugfs_i_callback(struct rcu_head *head)
 {
-       truncate_inode_pages_final(&inode->i_data);
-       clear_inode(inode);
+       struct inode *inode = container_of(head, struct inode, i_rcu);
        if (S_ISLNK(inode->i_mode))
                kfree(inode->i_link);
+       free_inode_nonrcu(inode);
+}
+
+static void debugfs_destroy_inode(struct inode *inode)
+{
+       call_rcu(&inode->i_rcu, debugfs_i_callback);
 }
 
 static const struct super_operations debugfs_super_operations = {
        .statfs         = simple_statfs,
        .remount_fs     = debugfs_remount,
        .show_options   = debugfs_show_options,
-       .evict_inode    = debugfs_evict_inode,
+       .destroy_inode  = debugfs_destroy_inode,
 };
 
 static void debugfs_release_dentry(struct dentry *dentry)
index a1ac7e9245ecc46efaceda18901f513922937376..75a5309f223151767aa951336dbd370e6776ec14 100644 (file)
@@ -384,7 +384,7 @@ static inline void ext4_update_inode_fsync_trans(handle_t *handle,
 {
        struct ext4_inode_info *ei = EXT4_I(inode);
 
-       if (ext4_handle_valid(handle)) {
+       if (ext4_handle_valid(handle) && !is_handle_aborted(handle)) {
                ei->i_sync_tid = handle->h_transaction->t_tid;
                if (datasync)
                        ei->i_datasync_tid = handle->h_transaction->t_tid;
index 69d65d49837bb65b10e47895f33ce6acc2ee696f..98ec11f69cd4d0d50abbaf14b6fd82224a10e6d0 100644 (file)
@@ -125,7 +125,7 @@ ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
        struct super_block *sb = inode->i_sb;
        int blockmask = sb->s_blocksize - 1;
 
-       if (pos >= i_size_read(inode))
+       if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
                return 0;
 
        if ((pos | iov_iter_alignment(from)) & blockmask)
index c2225f0d31b511bbb0c62b176324e4f483437638..2024d3fa55044f734b961f3c8818ba3d243260a3 100644 (file)
@@ -1222,6 +1222,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
        ext4_lblk_t offsets[4], offsets2[4];
        Indirect chain[4], chain2[4];
        Indirect *partial, *partial2;
+       Indirect *p = NULL, *p2 = NULL;
        ext4_lblk_t max_block;
        __le32 nr = 0, nr2 = 0;
        int n = 0, n2 = 0;
@@ -1263,7 +1264,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
                }
 
 
-               partial = ext4_find_shared(inode, n, offsets, chain, &nr);
+               partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
                if (nr) {
                        if (partial == chain) {
                                /* Shared branch grows from the inode */
@@ -1288,13 +1289,11 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
                                partial->p + 1,
                                (__le32 *)partial->bh->b_data+addr_per_block,
                                (chain+n-1) - partial);
-                       BUFFER_TRACE(partial->bh, "call brelse");
-                       brelse(partial->bh);
                        partial--;
                }
 
 end_range:
-               partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
+               partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
                if (nr2) {
                        if (partial2 == chain2) {
                                /*
@@ -1324,16 +1323,14 @@ end_range:
                                           (__le32 *)partial2->bh->b_data,
                                           partial2->p,
                                           (chain2+n2-1) - partial2);
-                       BUFFER_TRACE(partial2->bh, "call brelse");
-                       brelse(partial2->bh);
                        partial2--;
                }
                goto do_indirects;
        }
 
        /* Punch happened within the same level (n == n2) */
-       partial = ext4_find_shared(inode, n, offsets, chain, &nr);
-       partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
+       partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
+       partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
 
        /* Free top, but only if partial2 isn't its subtree. */
        if (nr) {
@@ -1390,11 +1387,7 @@ end_range:
                                           partial->p + 1,
                                           partial2->p,
                                           (chain+n-1) - partial);
-                       BUFFER_TRACE(partial->bh, "call brelse");
-                       brelse(partial->bh);
-                       BUFFER_TRACE(partial2->bh, "call brelse");
-                       brelse(partial2->bh);
-                       return 0;
+                       goto cleanup;
                }
 
                /*
@@ -1409,8 +1402,6 @@ end_range:
                                           partial->p + 1,
                                           (__le32 *)partial->bh->b_data+addr_per_block,
                                           (chain+n-1) - partial);
-                       BUFFER_TRACE(partial->bh, "call brelse");
-                       brelse(partial->bh);
                        partial--;
                }
                if (partial2 > chain2 && depth2 <= depth) {
@@ -1418,11 +1409,21 @@ end_range:
                                           (__le32 *)partial2->bh->b_data,
                                           partial2->p,
                                           (chain2+n2-1) - partial2);
-                       BUFFER_TRACE(partial2->bh, "call brelse");
-                       brelse(partial2->bh);
                        partial2--;
                }
        }
+
+cleanup:
+       while (p && p > chain) {
+               BUFFER_TRACE(p->bh, "call brelse");
+               brelse(p->bh);
+               p--;
+       }
+       while (p2 && p2 > chain2) {
+               BUFFER_TRACE(p2->bh, "call brelse");
+               brelse(p2->bh);
+               p2--;
+       }
        return 0;
 
 do_indirects:
@@ -1430,7 +1431,7 @@ do_indirects:
        switch (offsets[0]) {
        default:
                if (++n >= n2)
-                       return 0;
+                       break;
                nr = i_data[EXT4_IND_BLOCK];
                if (nr) {
                        ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
@@ -1439,7 +1440,7 @@ do_indirects:
                /* fall through */
        case EXT4_IND_BLOCK:
                if (++n >= n2)
-                       return 0;
+                       break;
                nr = i_data[EXT4_DIND_BLOCK];
                if (nr) {
                        ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
@@ -1448,7 +1449,7 @@ do_indirects:
                /* fall through */
        case EXT4_DIND_BLOCK:
                if (++n >= n2)
-                       return 0;
+                       break;
                nr = i_data[EXT4_TIND_BLOCK];
                if (nr) {
                        ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
@@ -1458,5 +1459,5 @@ do_indirects:
        case EXT4_TIND_BLOCK:
                ;
        }
-       return 0;
+       goto cleanup;
 }
index b54b261ded36f92076d95197e6c456e5bd17698a..b32a57bc5d5d602ffcb8536ef28ae2684317ed7d 100644 (file)
@@ -6080,36 +6080,6 @@ out:
        return;
 }
 
-#if 0
-/*
- * Bind an inode's backing buffer_head into this transaction, to prevent
- * it from being flushed to disk early.  Unlike
- * ext4_reserve_inode_write, this leaves behind no bh reference and
- * returns no iloc structure, so the caller needs to repeat the iloc
- * lookup to mark the inode dirty later.
- */
-static int ext4_pin_inode(handle_t *handle, struct inode *inode)
-{
-       struct ext4_iloc iloc;
-
-       int err = 0;
-       if (handle) {
-               err = ext4_get_inode_loc(inode, &iloc);
-               if (!err) {
-                       BUFFER_TRACE(iloc.bh, "get_write_access");
-                       err = jbd2_journal_get_write_access(handle, iloc.bh);
-                       if (!err)
-                               err = ext4_handle_dirty_metadata(handle,
-                                                                NULL,
-                                                                iloc.bh);
-                       brelse(iloc.bh);
-               }
-       }
-       ext4_std_error(inode->i_sb, err);
-       return err;
-}
-#endif
-
 int ext4_change_inode_journal_flag(struct inode *inode, int val)
 {
        journal_t *journal;
index 3c4f8bb59f8abfd23ceaf36f93c7fceffac0134e..bab3da4f1e0d36692fa172b6724379baa47bb2d6 100644 (file)
@@ -1000,6 +1000,13 @@ resizefs_out:
                if (!blk_queue_discard(q))
                        return -EOPNOTSUPP;
 
+               /*
+                * We haven't replayed the journal, so we cannot use our
+                * block-bitmap-guided storage zapping commands.
+                */
+               if (test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb))
+                       return -EROFS;
+
                if (copy_from_user(&range, (struct fstrim_range __user *)arg,
                    sizeof(range)))
                        return -EFAULT;
index 3d9b18505c0c799b272553d0adf81fa26e6a6833..e7ae26e36c9c119a0b8b025e57914ba411b38ac5 100644 (file)
@@ -932,11 +932,18 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
        memcpy(n_group_desc, o_group_desc,
               EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
        n_group_desc[gdb_num] = gdb_bh;
+
+       BUFFER_TRACE(gdb_bh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, gdb_bh);
+       if (err) {
+               kvfree(n_group_desc);
+               brelse(gdb_bh);
+               return err;
+       }
+
        EXT4_SB(sb)->s_group_desc = n_group_desc;
        EXT4_SB(sb)->s_gdb_count++;
        kvfree(o_group_desc);
-       BUFFER_TRACE(gdb_bh, "get_write_access");
-       err = ext4_journal_get_write_access(handle, gdb_bh);
        return err;
 }
 
@@ -2073,6 +2080,10 @@ out:
                free_flex_gd(flex_gd);
        if (resize_inode != NULL)
                iput(resize_inode);
-       ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count);
+       if (err)
+               ext4_warning(sb, "error (%d) occurred during "
+                            "file system resize", err);
+       ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
+                ext4_blocks_count(es));
        return err;
 }
index f5b828bf1299f1998d7b8ac2696b979c8f303079..6ed4eb81e67437dc2dddbbce8ae9f7f2b59681d1 100644 (file)
@@ -430,6 +430,12 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
        spin_unlock(&sbi->s_md_lock);
 }
 
+static bool system_going_down(void)
+{
+       return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
+               || system_state == SYSTEM_RESTART;
+}
+
 /* Deal with the reporting of failure conditions on a filesystem such as
  * inconsistencies detected or read IO failures.
  *
@@ -460,7 +466,12 @@ static void ext4_handle_error(struct super_block *sb)
                if (journal)
                        jbd2_journal_abort(journal, -EIO);
        }
-       if (test_opt(sb, ERRORS_RO)) {
+       /*
+        * We force ERRORS_RO behavior when system is rebooting. Otherwise we
+        * could panic during 'reboot -f' as the underlying device got already
+        * disabled.
+        */
+       if (test_opt(sb, ERRORS_RO) || system_going_down()) {
                ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
                /*
                 * Make sure updated value of ->s_mount_flags will be visible
@@ -468,8 +479,7 @@ static void ext4_handle_error(struct super_block *sb)
                 */
                smp_wmb();
                sb->s_flags |= SB_RDONLY;
-       }
-       if (test_opt(sb, ERRORS_PANIC)) {
+       } else if (test_opt(sb, ERRORS_PANIC)) {
                if (EXT4_SB(sb)->s_journal &&
                  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
                        return;
index 842e8f749db64eb6ee17297e1039bb7e2c4ea2b7..570d71043acf982976d3098cf5e4beaee7532241 100644 (file)
@@ -410,7 +410,7 @@ bool fs_validate_description(const struct fs_parameter_description *desc)
                        for (param = desc->specs; param->name; param++) {
                                if (param->opt == e->opt &&
                                    param->type != fs_param_is_enum) {
-                                       pr_err("VALIDATE %s: e[%lu] enum val for %s\n",
+                                       pr_err("VALIDATE %s: e[%tu] enum val for %s\n",
                                               name, e - desc->enums, param->name);
                                        good = false;
                                }
index 8a63e52785e978a6792542d96c66cb202e5f1d1b..9971a35cf1ef66c960862ef550197814052b9267 100644 (file)
@@ -2056,10 +2056,8 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
                rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
 
        ret = -EINVAL;
-       if (rem < len) {
-               pipe_unlock(pipe);
-               goto out;
-       }
+       if (rem < len)
+               goto out_free;
 
        rem = len;
        while (rem) {
@@ -2077,7 +2075,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
                        pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
                        pipe->nrbufs--;
                } else {
-                       pipe_buf_get(pipe, ibuf);
+                       if (!pipe_buf_get(pipe, ibuf))
+                               goto out_free;
+
                        *obuf = *ibuf;
                        obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
                        obuf->len = rem;
@@ -2100,11 +2100,11 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
        ret = fuse_dev_do_write(fud, &cs, len);
 
        pipe_lock(pipe);
+out_free:
        for (idx = 0; idx < nbuf; idx++)
                pipe_buf_release(pipe, &bufs[idx]);
        pipe_unlock(pipe);
 
-out:
        kvfree(bufs);
        return ret;
 }
index ec32fece5e1e9d80e726b7e8202cf214673d7a5a..9285dd4f4b1ce3ed2601daa62cb30de6d0eed92b 100644 (file)
@@ -755,11 +755,17 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
                                        umode_t mode, dev_t dev)
 {
        struct inode *inode;
-       struct resv_map *resv_map;
+       struct resv_map *resv_map = NULL;
 
-       resv_map = resv_map_alloc();
-       if (!resv_map)
-               return NULL;
+       /*
+        * Reserve maps are only needed for inodes that can have associated
+        * page allocations.
+        */
+       if (S_ISREG(mode) || S_ISLNK(mode)) {
+               resv_map = resv_map_alloc();
+               if (!resv_map)
+                       return NULL;
+       }
 
        inode = new_inode(sb);
        if (inode) {
@@ -794,8 +800,10 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
                        break;
                }
                lockdep_annotate_inode_mutex_key(inode);
-       } else
-               kref_put(&resv_map->refs, resv_map_release);
+       } else {
+               if (resv_map)
+                       kref_put(&resv_map->refs, resv_map_release);
+       }
 
        return inode;
 }
index e9d97add2b36c9731a8d877e2fd32c7c2e1a382d..9a453f3637f85a377d72c94bd9e6f1e7dbbf2ad5 100644 (file)
@@ -1817,8 +1817,13 @@ int file_remove_privs(struct file *file)
        int kill;
        int error = 0;
 
-       /* Fast path for nothing security related */
-       if (IS_NOSEC(inode))
+       /*
+        * Fast path for nothing security related.
+        * As well for non-regular files, e.g. blkdev inodes.
+        * For example, blkdev_write_iter() might get here
+        * trying to remove privs which it is not allowed to.
+        */
+       if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
                return 0;
 
        kill = dentry_needs_remove_privs(dentry);
index c88088d92613595eecf6f1119a2c53f0b63bab27..84efb8956734fbbe8204e1c6fc285a469e9d38d5 100644 (file)
@@ -4,15 +4,28 @@
  * supporting fast/efficient IO.
  *
  * A note on the read/write ordering memory barriers that are matched between
- * the application and kernel side. When the application reads the CQ ring
- * tail, it must use an appropriate smp_rmb() to order with the smp_wmb()
- * the kernel uses after writing the tail. Failure to do so could cause a
- * delay in when the application notices that completion events available.
- * This isn't a fatal condition. Likewise, the application must use an
- * appropriate smp_wmb() both before writing the SQ tail, and after writing
- * the SQ tail. The first one orders the sqe writes with the tail write, and
- * the latter is paired with the smp_rmb() the kernel will issue before
- * reading the SQ tail on submission.
+ * the application and kernel side.
+ *
+ * After the application reads the CQ ring tail, it must use an
+ * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
+ * before writing the tail (using smp_load_acquire to read the tail will
+ * do). It also needs a smp_mb() before updating CQ head (ordering the
+ * entry load(s) with the head store), pairing with an implicit barrier
+ * through a control-dependency in io_get_cqring (smp_store_release to
+ * store head will do). Failure to do so could lead to reading invalid
+ * CQ entries.
+ *
+ * Likewise, the application must use an appropriate smp_wmb() before
+ * writing the SQ tail (ordering SQ entry stores with the tail store),
+ * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
+ * to store the tail will do). And it needs a barrier ordering the SQ
+ * head load before writing new SQ entries (smp_load_acquire to read
+ * head will do).
+ *
+ * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
+ * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
+ * updating the SQ tail; a full memory barrier smp_mb() is needed
+ * between.
  *
  * Also see the examples in the liburing library:
  *
@@ -70,20 +83,108 @@ struct io_uring {
        u32 tail ____cacheline_aligned_in_smp;
 };
 
+/*
+ * This data is shared with the application through the mmap at offset
+ * IORING_OFF_SQ_RING.
+ *
+ * The offsets to the member fields are published through struct
+ * io_sqring_offsets when calling io_uring_setup.
+ */
 struct io_sq_ring {
+       /*
+        * Head and tail offsets into the ring; the offsets need to be
+        * masked to get valid indices.
+        *
+        * The kernel controls head and the application controls tail.
+        */
        struct io_uring         r;
+       /*
+        * Bitmask to apply to head and tail offsets (constant, equals
+        * ring_entries - 1)
+        */
        u32                     ring_mask;
+       /* Ring size (constant, power of 2) */
        u32                     ring_entries;
+       /*
+        * Number of invalid entries dropped by the kernel due to
+        * invalid index stored in array
+        *
+        * Written by the kernel, shouldn't be modified by the
+        * application (i.e. get number of "new events" by comparing to
+        * cached value).
+        *
+        * After a new SQ head value was read by the application this
+        * counter includes all submissions that were dropped reaching
+        * the new SQ head (and possibly more).
+        */
        u32                     dropped;
+       /*
+        * Runtime flags
+        *
+        * Written by the kernel, shouldn't be modified by the
+        * application.
+        *
+        * The application needs a full memory barrier before checking
+        * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
+        */
        u32                     flags;
+       /*
+        * Ring buffer of indices into array of io_uring_sqe, which is
+        * mmapped by the application using the IORING_OFF_SQES offset.
+        *
+        * This indirection could e.g. be used to assign fixed
+        * io_uring_sqe entries to operations and only submit them to
+        * the queue when needed.
+        *
+        * The kernel modifies neither the indices array nor the entries
+        * array.
+        */
        u32                     array[];
 };
 
+/*
+ * This data is shared with the application through the mmap at offset
+ * IORING_OFF_CQ_RING.
+ *
+ * The offsets to the member fields are published through struct
+ * io_cqring_offsets when calling io_uring_setup.
+ */
 struct io_cq_ring {
+       /*
+        * Head and tail offsets into the ring; the offsets need to be
+        * masked to get valid indices.
+        *
+        * The application controls head and the kernel tail.
+        */
        struct io_uring         r;
+       /*
+        * Bitmask to apply to head and tail offsets (constant, equals
+        * ring_entries - 1)
+        */
        u32                     ring_mask;
+       /* Ring size (constant, power of 2) */
        u32                     ring_entries;
+       /*
+        * Number of completion events lost because the queue was full;
+        * this should be avoided by the application by making sure
+        * there are not more requests pending thatn there is space in
+        * the completion queue.
+        *
+        * Written by the kernel, shouldn't be modified by the
+        * application (i.e. get number of "new events" by comparing to
+        * cached value).
+        *
+        * As completion events come in out of order this counter is not
+        * ordered with any other data.
+        */
        u32                     overflow;
+       /*
+        * Ring buffer of completion events.
+        *
+        * The kernel writes completion events fresh every time they are
+        * produced, so the application is allowed to modify pending
+        * entries.
+        */
        struct io_uring_cqe     cqes[];
 };
 
@@ -189,17 +290,28 @@ struct sqe_submit {
        bool                            needs_fixed_file;
 };
 
+/*
+ * First field must be the file pointer in all the
+ * iocb unions! See also 'struct kiocb' in <linux/fs.h>
+ */
 struct io_poll_iocb {
        struct file                     *file;
        struct wait_queue_head          *head;
        __poll_t                        events;
-       bool                            woken;
+       bool                            done;
        bool                            canceled;
        struct wait_queue_entry         wait;
 };
 
+/*
+ * NOTE! Each of the iocb union members has the file pointer
+ * as the first entry in their struct definition. So you can
+ * access the file pointer through any of the sub-structs,
+ * or directly as just 'ki_filp' in this struct.
+ */
 struct io_kiocb {
        union {
+               struct file             *file;
                struct kiocb            rw;
                struct io_poll_iocb     poll;
        };
@@ -210,10 +322,11 @@ struct io_kiocb {
        struct list_head        list;
        unsigned int            flags;
        refcount_t              refs;
-#define REQ_F_FORCE_NONBLOCK   1       /* inline submission attempt */
+#define REQ_F_NOWAIT           1       /* must not punt to workers */
 #define REQ_F_IOPOLL_COMPLETED 2       /* polled IO has completed */
 #define REQ_F_FIXED_FILE       4       /* ctx owns file */
 #define REQ_F_SEQ_PREV         8       /* sequential with previous */
+#define REQ_F_PREPPED          16      /* prep already done */
        u64                     user_data;
        u64                     error;
 
@@ -305,12 +418,6 @@ static void io_commit_cqring(struct io_ring_ctx *ctx)
                /* order cqe stores with ring update */
                smp_store_release(&ring->r.tail, ctx->cached_cq_tail);
 
-               /*
-                * Write sider barrier of tail update, app has read side. See
-                * comment at the top of this file.
-                */
-               smp_wmb();
-
                if (wq_has_sleeper(&ctx->cq_wait)) {
                        wake_up_interruptible(&ctx->cq_wait);
                        kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
@@ -324,9 +431,12 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
        unsigned tail;
 
        tail = ctx->cached_cq_tail;
-       /* See comment at the top of the file */
-       smp_rmb();
-       if (tail + 1 == READ_ONCE(ring->r.head))
+       /*
+        * writes to the cq entry need to come after reading head; the
+        * control dependency is enough as we're using WRITE_ONCE to
+        * fill the cq entry
+        */
+       if (tail - READ_ONCE(ring->r.head) == ring->ring_entries)
                return NULL;
 
        ctx->cached_cq_tail++;
@@ -355,20 +465,25 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
        }
 }
 
-static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 ki_user_data,
+static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+{
+       if (waitqueue_active(&ctx->wait))
+               wake_up(&ctx->wait);
+       if (waitqueue_active(&ctx->sqo_wait))
+               wake_up(&ctx->sqo_wait);
+}
+
+static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
                                long res, unsigned ev_flags)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&ctx->completion_lock, flags);
-       io_cqring_fill_event(ctx, ki_user_data, res, ev_flags);
+       io_cqring_fill_event(ctx, user_data, res, ev_flags);
        io_commit_cqring(ctx);
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
-       if (waitqueue_active(&ctx->wait))
-               wake_up(&ctx->wait);
-       if (waitqueue_active(&ctx->sqo_wait))
-               wake_up(&ctx->sqo_wait);
+       io_cqring_ev_posted(ctx);
 }
 
 static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
@@ -382,13 +497,14 @@ static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
 static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
                                   struct io_submit_state *state)
 {
+       gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
        struct io_kiocb *req;
 
        if (!percpu_ref_tryget(&ctx->refs))
                return NULL;
 
        if (!state) {
-               req = kmem_cache_alloc(req_cachep, __GFP_NOWARN);
+               req = kmem_cache_alloc(req_cachep, gfp);
                if (unlikely(!req))
                        goto out;
        } else if (!state->free_reqs) {
@@ -396,10 +512,18 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
                int ret;
 
                sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
-               ret = kmem_cache_alloc_bulk(req_cachep, __GFP_NOWARN, sz,
-                                               state->reqs);
-               if (unlikely(ret <= 0))
-                       goto out;
+               ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
+
+               /*
+                * Bulk alloc is all-or-nothing. If we fail to get a batch,
+                * retry single alloc to be on the safe side.
+                */
+               if (unlikely(ret <= 0)) {
+                       state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
+                       if (!state->reqs[0])
+                               goto out;
+                       ret = 1;
+               }
                state->free_reqs = ret - 1;
                state->cur_req = 1;
                req = state->reqs[0];
@@ -411,7 +535,8 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
 
        req->ctx = ctx;
        req->flags = 0;
-       refcount_set(&req->refs, 0);
+       /* one is dropped after submission, the other at completion */
+       refcount_set(&req->refs, 2);
        return req;
 out:
        io_ring_drop_ctx_refs(ctx, 1);
@@ -429,10 +554,16 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
 
 static void io_free_req(struct io_kiocb *req)
 {
-       if (!refcount_read(&req->refs) || refcount_dec_and_test(&req->refs)) {
-               io_ring_drop_ctx_refs(req->ctx, 1);
-               kmem_cache_free(req_cachep, req);
-       }
+       if (req->file && !(req->flags & REQ_F_FIXED_FILE))
+               fput(req->file);
+       io_ring_drop_ctx_refs(req->ctx, 1);
+       kmem_cache_free(req_cachep, req);
+}
+
+static void io_put_req(struct io_kiocb *req)
+{
+       if (refcount_dec_and_test(&req->refs))
+               io_free_req(req);
 }
 
 /*
@@ -442,44 +573,34 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
                               struct list_head *done)
 {
        void *reqs[IO_IOPOLL_BATCH];
-       int file_count, to_free;
-       struct file *file = NULL;
        struct io_kiocb *req;
+       int to_free;
 
-       file_count = to_free = 0;
+       to_free = 0;
        while (!list_empty(done)) {
                req = list_first_entry(done, struct io_kiocb, list);
                list_del(&req->list);
 
                io_cqring_fill_event(ctx, req->user_data, req->error, 0);
-
-               reqs[to_free++] = req;
                (*nr_events)++;
 
-               /*
-                * Batched puts of the same file, to avoid dirtying the
-                * file usage count multiple times, if avoidable.
-                */
-               if (!(req->flags & REQ_F_FIXED_FILE)) {
-                       if (!file) {
-                               file = req->rw.ki_filp;
-                               file_count = 1;
-                       } else if (file == req->rw.ki_filp) {
-                               file_count++;
+               if (refcount_dec_and_test(&req->refs)) {
+                       /* If we're not using fixed files, we have to pair the
+                        * completion part with the file put. Use regular
+                        * completions for those, only batch free for fixed
+                        * file.
+                        */
+                       if (req->flags & REQ_F_FIXED_FILE) {
+                               reqs[to_free++] = req;
+                               if (to_free == ARRAY_SIZE(reqs))
+                                       io_free_req_many(ctx, reqs, &to_free);
                        } else {
-                               fput_many(file, file_count);
-                               file = req->rw.ki_filp;
-                               file_count = 1;
+                               io_free_req(req);
                        }
                }
-
-               if (to_free == ARRAY_SIZE(reqs))
-                       io_free_req_many(ctx, reqs, &to_free);
        }
-       io_commit_cqring(ctx);
 
-       if (file)
-               fput_many(file, file_count);
+       io_commit_cqring(ctx);
        io_free_req_many(ctx, reqs, &to_free);
 }
 
@@ -602,21 +723,14 @@ static void kiocb_end_write(struct kiocb *kiocb)
        }
 }
 
-static void io_fput(struct io_kiocb *req)
-{
-       if (!(req->flags & REQ_F_FIXED_FILE))
-               fput(req->rw.ki_filp);
-}
-
 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
 
        kiocb_end_write(kiocb);
 
-       io_fput(req);
        io_cqring_add_event(req->ctx, req->user_data, res, 0);
-       io_free_req(req);
+       io_put_req(req);
 }
 
 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
@@ -666,11 +780,9 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
                list_add_tail(&req->list, &ctx->poll_list);
 }
 
-static void io_file_put(struct io_submit_state *state, struct file *file)
+static void io_file_put(struct io_submit_state *state)
 {
-       if (!state) {
-               fput(file);
-       } else if (state->file) {
+       if (state->file) {
                int diff = state->has_refs - state->used_refs;
 
                if (diff)
@@ -695,7 +807,7 @@ static struct file *io_file_get(struct io_submit_state *state, int fd)
                        state->ios_left--;
                        return state->file;
                }
-               io_file_put(state, NULL);
+               io_file_put(state);
        }
        state->file = fget_many(fd, state->ios_left);
        if (!state->file)
@@ -726,36 +838,23 @@ static bool io_file_supports_async(struct file *file)
 }
 
 static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
-                     bool force_nonblock, struct io_submit_state *state)
+                     bool force_nonblock)
 {
        const struct io_uring_sqe *sqe = s->sqe;
        struct io_ring_ctx *ctx = req->ctx;
        struct kiocb *kiocb = &req->rw;
-       unsigned ioprio, flags;
-       int fd, ret;
+       unsigned ioprio;
+       int ret;
 
+       if (!req->file)
+               return -EBADF;
        /* For -EAGAIN retry, everything is already prepped */
-       if (kiocb->ki_filp)
+       if (req->flags & REQ_F_PREPPED)
                return 0;
 
-       flags = READ_ONCE(sqe->flags);
-       fd = READ_ONCE(sqe->fd);
+       if (force_nonblock && !io_file_supports_async(req->file))
+               force_nonblock = false;
 
-       if (flags & IOSQE_FIXED_FILE) {
-               if (unlikely(!ctx->user_files ||
-                   (unsigned) fd >= ctx->nr_user_files))
-                       return -EBADF;
-               kiocb->ki_filp = ctx->user_files[fd];
-               req->flags |= REQ_F_FIXED_FILE;
-       } else {
-               if (s->needs_fixed_file)
-                       return -EBADF;
-               kiocb->ki_filp = io_file_get(state, fd);
-               if (unlikely(!kiocb->ki_filp))
-                       return -EBADF;
-               if (force_nonblock && !io_file_supports_async(kiocb->ki_filp))
-                       force_nonblock = false;
-       }
        kiocb->ki_pos = READ_ONCE(sqe->off);
        kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
        kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
@@ -764,7 +863,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
        if (ioprio) {
                ret = ioprio_check_cap(ioprio);
                if (ret)
-                       goto out_fput;
+                       return ret;
 
                kiocb->ki_ioprio = ioprio;
        } else
@@ -772,38 +871,30 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
 
        ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
        if (unlikely(ret))
-               goto out_fput;
-       if (force_nonblock) {
+               return ret;
+
+       /* don't allow async punt if RWF_NOWAIT was requested */
+       if (kiocb->ki_flags & IOCB_NOWAIT)
+               req->flags |= REQ_F_NOWAIT;
+
+       if (force_nonblock)
                kiocb->ki_flags |= IOCB_NOWAIT;
-               req->flags |= REQ_F_FORCE_NONBLOCK;
-       }
+
        if (ctx->flags & IORING_SETUP_IOPOLL) {
-               ret = -EOPNOTSUPP;
                if (!(kiocb->ki_flags & IOCB_DIRECT) ||
                    !kiocb->ki_filp->f_op->iopoll)
-                       goto out_fput;
+                       return -EOPNOTSUPP;
 
                req->error = 0;
                kiocb->ki_flags |= IOCB_HIPRI;
                kiocb->ki_complete = io_complete_rw_iopoll;
        } else {
-               if (kiocb->ki_flags & IOCB_HIPRI) {
-                       ret = -EINVAL;
-                       goto out_fput;
-               }
+               if (kiocb->ki_flags & IOCB_HIPRI)
+                       return -EINVAL;
                kiocb->ki_complete = io_complete_rw;
        }
+       req->flags |= REQ_F_PREPPED;
        return 0;
-out_fput:
-       if (!(flags & IOSQE_FIXED_FILE)) {
-               /*
-                * in case of error, we didn't use this file reference. drop it.
-                */
-               if (state)
-                       state->used_refs--;
-               io_file_put(state, kiocb->ki_filp);
-       }
-       return ret;
 }
 
 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
@@ -864,6 +955,9 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
        iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
        if (offset)
                iov_iter_advance(iter, offset);
+
+       /* don't drop a reference to these pages */
+       iter->type |= ITER_BVEC_FLAG_NO_REF;
        return 0;
 }
 
@@ -887,7 +981,7 @@ static int io_import_iovec(struct io_ring_ctx *ctx, int rw,
        opcode = READ_ONCE(sqe->opcode);
        if (opcode == IORING_OP_READ_FIXED ||
            opcode == IORING_OP_WRITE_FIXED) {
-               ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
+               int ret = io_import_fixed(ctx, rw, sqe, iter);
                *iovec = NULL;
                return ret;
        }
@@ -945,31 +1039,29 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
        async_list->io_end = io_end;
 }
 
-static ssize_t io_read(struct io_kiocb *req, const struct sqe_submit *s,
-                      bool force_nonblock, struct io_submit_state *state)
+static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
+                  bool force_nonblock)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct kiocb *kiocb = &req->rw;
        struct iov_iter iter;
        struct file *file;
        size_t iov_count;
-       ssize_t ret;
+       int ret;
 
-       ret = io_prep_rw(req, s, force_nonblock, state);
+       ret = io_prep_rw(req, s, force_nonblock);
        if (ret)
                return ret;
        file = kiocb->ki_filp;
 
-       ret = -EBADF;
        if (unlikely(!(file->f_mode & FMODE_READ)))
-               goto out_fput;
-       ret = -EINVAL;
+               return -EBADF;
        if (unlikely(!file->f_op->read_iter))
-               goto out_fput;
+               return -EINVAL;
 
        ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
        if (ret)
-               goto out_fput;
+               return ret;
 
        iov_count = iov_iter_count(&iter);
        ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
@@ -991,38 +1083,32 @@ static ssize_t io_read(struct io_kiocb *req, const struct sqe_submit *s,
                }
        }
        kfree(iovec);
-out_fput:
-       /* Hold on to the file for -EAGAIN */
-       if (unlikely(ret && ret != -EAGAIN))
-               io_fput(req);
        return ret;
 }
 
-static ssize_t io_write(struct io_kiocb *req, const struct sqe_submit *s,
-                       bool force_nonblock, struct io_submit_state *state)
+static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
+                   bool force_nonblock)
 {
        struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
        struct kiocb *kiocb = &req->rw;
        struct iov_iter iter;
        struct file *file;
        size_t iov_count;
-       ssize_t ret;
+       int ret;
 
-       ret = io_prep_rw(req, s, force_nonblock, state);
+       ret = io_prep_rw(req, s, force_nonblock);
        if (ret)
                return ret;
 
-       ret = -EBADF;
        file = kiocb->ki_filp;
        if (unlikely(!(file->f_mode & FMODE_WRITE)))
-               goto out_fput;
-       ret = -EINVAL;
+               return -EBADF;
        if (unlikely(!file->f_op->write_iter))
-               goto out_fput;
+               return -EINVAL;
 
        ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
        if (ret)
-               goto out_fput;
+               return ret;
 
        iov_count = iov_iter_count(&iter);
 
@@ -1036,6 +1122,8 @@ static ssize_t io_write(struct io_kiocb *req, const struct sqe_submit *s,
 
        ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
        if (!ret) {
+               ssize_t ret2;
+
                /*
                 * Open-code file_start_write here to grab freeze protection,
                 * which will be released by another thread in
@@ -1050,14 +1138,22 @@ static ssize_t io_write(struct io_kiocb *req, const struct sqe_submit *s,
                                                SB_FREEZE_WRITE);
                }
                kiocb->ki_flags |= IOCB_WRITE;
-               io_rw_done(kiocb, call_write_iter(file, kiocb, &iter));
+
+               ret2 = call_write_iter(file, kiocb, &iter);
+               if (!force_nonblock || ret2 != -EAGAIN) {
+                       io_rw_done(kiocb, ret2);
+               } else {
+                       /*
+                        * If ->needs_lock is true, we're already in async
+                        * context.
+                        */
+                       if (!s->needs_lock)
+                               io_async_list_note(WRITE, req, iov_count);
+                       ret = -EAGAIN;
+               }
        }
 out_free:
        kfree(iovec);
-out_fput:
-       /* Hold on to the file for -EAGAIN */
-       if (unlikely(ret && ret != -EAGAIN))
-               io_fput(req);
        return ret;
 }
 
@@ -1072,29 +1168,19 @@ static int io_nop(struct io_kiocb *req, u64 user_data)
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
 
-       /*
-        * Twilight zone - it's possible that someone issued an opcode that
-        * has a file attached, then got -EAGAIN on submission, and changed
-        * the sqe before we retried it from async context. Avoid dropping
-        * a file reference for this malicious case, and flag the error.
-        */
-       if (req->rw.ki_filp) {
-               err = -EBADF;
-               io_fput(req);
-       }
        io_cqring_add_event(ctx, user_data, err, 0);
-       io_free_req(req);
+       io_put_req(req);
        return 0;
 }
 
 static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       unsigned flags;
-       int fd;
 
-       /* Prep already done */
-       if (req->rw.ki_filp)
+       if (!req->file)
+               return -EBADF;
+       /* Prep already done (EAGAIN retry) */
+       if (req->flags & REQ_F_PREPPED)
                return 0;
 
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
@@ -1102,20 +1188,7 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
                return -EINVAL;
 
-       fd = READ_ONCE(sqe->fd);
-       flags = READ_ONCE(sqe->flags);
-
-       if (flags & IOSQE_FIXED_FILE) {
-               if (unlikely(!ctx->user_files || fd >= ctx->nr_user_files))
-                       return -EBADF;
-               req->rw.ki_filp = ctx->user_files[fd];
-               req->flags |= REQ_F_FIXED_FILE;
-       } else {
-               req->rw.ki_filp = fget(fd);
-               if (unlikely(!req->rw.ki_filp))
-                       return -EBADF;
-       }
-
+       req->flags |= REQ_F_PREPPED;
        return 0;
 }
 
@@ -1144,9 +1217,8 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
                                end > 0 ? end : LLONG_MAX,
                                fsync_flags & IORING_FSYNC_DATASYNC);
 
-       io_fput(req);
        io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
-       io_free_req(req);
+       io_put_req(req);
        return 0;
 }
 
@@ -1204,15 +1276,16 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        spin_unlock_irq(&ctx->completion_lock);
 
        io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
-       io_free_req(req);
+       io_put_req(req);
        return 0;
 }
 
-static void io_poll_complete(struct io_kiocb *req, __poll_t mask)
+static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
+                            __poll_t mask)
 {
-       io_cqring_add_event(req->ctx, req->user_data, mangle_poll(mask), 0);
-       io_fput(req);
-       io_free_req(req);
+       req->poll.done = true;
+       io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask), 0);
+       io_commit_cqring(ctx);
 }
 
 static void io_poll_complete_work(struct work_struct *work)
@@ -1240,9 +1313,11 @@ static void io_poll_complete_work(struct work_struct *work)
                return;
        }
        list_del_init(&req->list);
+       io_poll_complete(ctx, req, mask);
        spin_unlock_irq(&ctx->completion_lock);
 
-       io_poll_complete(req, mask);
+       io_cqring_ev_posted(ctx);
+       io_put_req(req);
 }
 
 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
@@ -1253,29 +1328,25 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
        struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
        struct io_ring_ctx *ctx = req->ctx;
        __poll_t mask = key_to_poll(key);
-
-       poll->woken = true;
+       unsigned long flags;
 
        /* for instances that support it check for an event match first: */
-       if (mask) {
-               unsigned long flags;
+       if (mask && !(mask & poll->events))
+               return 0;
 
-               if (!(mask & poll->events))
-                       return 0;
+       list_del_init(&poll->wait.entry);
 
-               /* try to complete the iocb inline if we can: */
-               if (spin_trylock_irqsave(&ctx->completion_lock, flags)) {
-                       list_del(&req->list);
-                       spin_unlock_irqrestore(&ctx->completion_lock, flags);
+       if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
+               list_del(&req->list);
+               io_poll_complete(ctx, req, mask);
+               spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
-                       list_del_init(&poll->wait.entry);
-                       io_poll_complete(req, mask);
-                       return 1;
-               }
+               io_cqring_ev_posted(ctx);
+               io_put_req(req);
+       } else {
+               queue_work(ctx->sqo_wq, &req->work);
        }
 
-       list_del_init(&poll->wait.entry);
-       queue_work(ctx->sqo_wq, &req->work);
        return 1;
 }
 
@@ -1305,36 +1376,23 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        struct io_poll_iocb *poll = &req->poll;
        struct io_ring_ctx *ctx = req->ctx;
        struct io_poll_table ipt;
-       unsigned flags;
+       bool cancel = false;
        __poll_t mask;
        u16 events;
-       int fd;
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
                return -EINVAL;
+       if (!poll->file)
+               return -EBADF;
 
        INIT_WORK(&req->work, io_poll_complete_work);
        events = READ_ONCE(sqe->poll_events);
        poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
 
-       flags = READ_ONCE(sqe->flags);
-       fd = READ_ONCE(sqe->fd);
-
-       if (flags & IOSQE_FIXED_FILE) {
-               if (unlikely(!ctx->user_files || fd >= ctx->nr_user_files))
-                       return -EBADF;
-               poll->file = ctx->user_files[fd];
-               req->flags |= REQ_F_FIXED_FILE;
-       } else {
-               poll->file = fget(fd);
-       }
-       if (unlikely(!poll->file))
-               return -EBADF;
-
        poll->head = NULL;
-       poll->woken = false;
+       poll->done = false;
        poll->canceled = false;
 
        ipt.pt._qproc = io_poll_queue_proc;
@@ -1346,56 +1404,43 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        INIT_LIST_HEAD(&poll->wait.entry);
        init_waitqueue_func_entry(&poll->wait, io_poll_wake);
 
-       /* one for removal from waitqueue, one for this function */
-       refcount_set(&req->refs, 2);
-
        mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
-       if (unlikely(!poll->head)) {
-               /* we did not manage to set up a waitqueue, done */
-               goto out;
-       }
 
        spin_lock_irq(&ctx->completion_lock);
-       spin_lock(&poll->head->lock);
-       if (poll->woken) {
-               /* wake_up context handles the rest */
-               mask = 0;
+       if (likely(poll->head)) {
+               spin_lock(&poll->head->lock);
+               if (unlikely(list_empty(&poll->wait.entry))) {
+                       if (ipt.error)
+                               cancel = true;
+                       ipt.error = 0;
+                       mask = 0;
+               }
+               if (mask || ipt.error)
+                       list_del_init(&poll->wait.entry);
+               else if (cancel)
+                       WRITE_ONCE(poll->canceled, true);
+               else if (!poll->done) /* actually waiting for an event */
+                       list_add_tail(&req->list, &ctx->cancel_list);
+               spin_unlock(&poll->head->lock);
+       }
+       if (mask) { /* no async, we'd stolen it */
+               req->error = mangle_poll(mask);
                ipt.error = 0;
-       } else if (mask || ipt.error) {
-               /* if we get an error or a mask we are done */
-               WARN_ON_ONCE(list_empty(&poll->wait.entry));
-               list_del_init(&poll->wait.entry);
-       } else {
-               /* actually waiting for an event */
-               list_add_tail(&req->list, &ctx->cancel_list);
+               io_poll_complete(ctx, req, mask);
        }
-       spin_unlock(&poll->head->lock);
        spin_unlock_irq(&ctx->completion_lock);
 
-out:
-       if (unlikely(ipt.error)) {
-               if (!(flags & IOSQE_FIXED_FILE))
-                       fput(poll->file);
-               /*
-                * Drop one of our refs to this req, __io_submit_sqe() will
-                * drop the other one since we're returning an error.
-                */
-               io_free_req(req);
-               return ipt.error;
+       if (mask) {
+               io_cqring_ev_posted(ctx);
+               io_put_req(req);
        }
-
-       if (mask)
-               io_poll_complete(req, mask);
-       io_free_req(req);
-       return 0;
+       return ipt.error;
 }
 
 static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
-                          const struct sqe_submit *s, bool force_nonblock,
-                          struct io_submit_state *state)
+                          const struct sqe_submit *s, bool force_nonblock)
 {
-       ssize_t ret;
-       int opcode;
+       int ret, opcode;
 
        if (unlikely(s->index >= ctx->sq_entries))
                return -EINVAL;
@@ -1409,18 +1454,18 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
        case IORING_OP_READV:
                if (unlikely(s->sqe->buf_index))
                        return -EINVAL;
-               ret = io_read(req, s, force_nonblock, state);
+               ret = io_read(req, s, force_nonblock);
                break;
        case IORING_OP_WRITEV:
                if (unlikely(s->sqe->buf_index))
                        return -EINVAL;
-               ret = io_write(req, s, force_nonblock, state);
+               ret = io_write(req, s, force_nonblock);
                break;
        case IORING_OP_READ_FIXED:
-               ret = io_read(req, s, force_nonblock, state);
+               ret = io_read(req, s, force_nonblock);
                break;
        case IORING_OP_WRITE_FIXED:
-               ret = io_write(req, s, force_nonblock, state);
+               ret = io_write(req, s, force_nonblock);
                break;
        case IORING_OP_FSYNC:
                ret = io_fsync(req, s->sqe, force_nonblock);
@@ -1493,8 +1538,7 @@ restart:
                struct sqe_submit *s = &req->submit;
                const struct io_uring_sqe *sqe = s->sqe;
 
-               /* Ensure we clear previously set forced non-block flag */
-               req->flags &= ~REQ_F_FORCE_NONBLOCK;
+               /* Ensure we clear previously set non-block flag */
                req->rw.ki_flags &= ~IOCB_NOWAIT;
 
                ret = 0;
@@ -1513,7 +1557,7 @@ restart:
                        s->has_user = cur_mm != NULL;
                        s->needs_lock = true;
                        do {
-                               ret = __io_submit_sqe(ctx, req, s, false, NULL);
+                               ret = __io_submit_sqe(ctx, req, s, false);
                                /*
                                 * We can get EAGAIN for polled IO even though
                                 * we're forcing a sync submission from here,
@@ -1525,9 +1569,13 @@ restart:
                                cond_resched();
                        } while (1);
                }
+
+               /* drop submission reference */
+               io_put_req(req);
+
                if (ret) {
                        io_cqring_add_event(ctx, sqe->user_data, ret, 0);
-                       io_free_req(req);
+                       io_put_req(req);
                }
 
                /* async context always use a copy of the sqe */
@@ -1614,11 +1662,55 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
        return ret;
 }
 
+static bool io_op_needs_file(const struct io_uring_sqe *sqe)
+{
+       int op = READ_ONCE(sqe->opcode);
+
+       switch (op) {
+       case IORING_OP_NOP:
+       case IORING_OP_POLL_REMOVE:
+               return false;
+       default:
+               return true;
+       }
+}
+
+static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
+                          struct io_submit_state *state, struct io_kiocb *req)
+{
+       unsigned flags;
+       int fd;
+
+       flags = READ_ONCE(s->sqe->flags);
+       fd = READ_ONCE(s->sqe->fd);
+
+       if (!io_op_needs_file(s->sqe)) {
+               req->file = NULL;
+               return 0;
+       }
+
+       if (flags & IOSQE_FIXED_FILE) {
+               if (unlikely(!ctx->user_files ||
+                   (unsigned) fd >= ctx->nr_user_files))
+                       return -EBADF;
+               req->file = ctx->user_files[fd];
+               req->flags |= REQ_F_FIXED_FILE;
+       } else {
+               if (s->needs_fixed_file)
+                       return -EBADF;
+               req->file = io_file_get(state, fd);
+               if (unlikely(!req->file))
+                       return -EBADF;
+       }
+
+       return 0;
+}
+
 static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
                         struct io_submit_state *state)
 {
        struct io_kiocb *req;
-       ssize_t ret;
+       int ret;
 
        /* enforce forwards compatibility on users */
        if (unlikely(s->sqe->flags & ~IOSQE_FIXED_FILE))
@@ -1628,10 +1720,12 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
        if (unlikely(!req))
                return -EAGAIN;
 
-       req->rw.ki_filp = NULL;
+       ret = io_req_set_file(ctx, s, state, req);
+       if (unlikely(ret))
+               goto out;
 
-       ret = __io_submit_sqe(ctx, req, s, true, state);
-       if (ret == -EAGAIN) {
+       ret = __io_submit_sqe(ctx, req, s, true);
+       if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
                struct io_uring_sqe *sqe_copy;
 
                sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
@@ -1649,11 +1743,23 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
                                INIT_WORK(&req->work, io_sq_wq_submit_work);
                                queue_work(ctx->sqo_wq, &req->work);
                        }
-                       ret = 0;
+
+                       /*
+                        * Queued up for async execution, worker will release
+                        * submit reference when the iocb is actually
+                        * submitted.
+                        */
+                       return 0;
                }
        }
+
+out:
+       /* drop submission reference */
+       io_put_req(req);
+
+       /* and drop final reference, if we failed */
        if (ret)
-               io_free_req(req);
+               io_put_req(req);
 
        return ret;
 }
@@ -1664,7 +1770,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
 static void io_submit_state_end(struct io_submit_state *state)
 {
        blk_finish_plug(&state->plug);
-       io_file_put(state, NULL);
+       io_file_put(state);
        if (state->free_reqs)
                kmem_cache_free_bulk(req_cachep, state->free_reqs,
                                        &state->reqs[state->cur_req]);
@@ -1693,23 +1799,9 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
                 * write new data to them.
                 */
                smp_store_release(&ring->r.head, ctx->cached_sq_head);
-
-               /*
-                * write side barrier of head update, app has read side. See
-                * comment at the top of this file
-                */
-               smp_wmb();
        }
 }
 
-/*
- * Undo last io_get_sqring()
- */
-static void io_drop_sqring(struct io_ring_ctx *ctx)
-{
-       ctx->cached_sq_head--;
-}
-
 /*
  * Fetch an sqe, if one is available. Note that s->sqe will point to memory
  * that is mapped by userspace. This means that care needs to be taken to
@@ -1732,9 +1824,8 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
         *    though the application is the one updating it.
         */
        head = ctx->cached_sq_head;
-       /* See comment at the top of this file */
-       smp_rmb();
-       if (head == READ_ONCE(ring->r.tail))
+       /* make sure SQ entry isn't read before tail */
+       if (head == smp_load_acquire(&ring->r.tail))
                return false;
 
        head = READ_ONCE(ring->array[head & ctx->sq_mask]);
@@ -1748,8 +1839,6 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
        /* drop invalid entries */
        ctx->cached_sq_head++;
        ring->dropped++;
-       /* See comment at the top of this file */
-       smp_wmb();
        return false;
 }
 
@@ -1859,7 +1948,8 @@ static int io_sq_thread(void *data)
 
                        /* Tell userspace we may need a wakeup call */
                        ctx->sq_ring->flags |= IORING_SQ_NEED_WAKEUP;
-                       smp_wmb();
+                       /* make sure to read SQ tail after writing flags */
+                       smp_mb();
 
                        if (!io_get_sqring(ctx, &sqes[0])) {
                                if (kthread_should_stop()) {
@@ -1872,13 +1962,11 @@ static int io_sq_thread(void *data)
                                finish_wait(&ctx->sqo_wait, &wait);
 
                                ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP;
-                               smp_wmb();
                                continue;
                        }
                        finish_wait(&ctx->sqo_wait, &wait);
 
                        ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP;
-                       smp_wmb();
                }
 
                i = 0;
@@ -1913,13 +2001,17 @@ static int io_sq_thread(void *data)
                unuse_mm(cur_mm);
                mmput(cur_mm);
        }
+
+       if (kthread_should_park())
+               kthread_parkme();
+
        return 0;
 }
 
 static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
 {
        struct io_submit_state state, *statep = NULL;
-       int i, ret = 0, submit = 0;
+       int i, submit = 0;
 
        if (to_submit > IO_PLUG_THRESHOLD) {
                io_submit_state_start(&state, ctx, to_submit);
@@ -1928,6 +2020,7 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
 
        for (i = 0; i < to_submit; i++) {
                struct sqe_submit s;
+               int ret;
 
                if (!io_get_sqring(ctx, &s))
                        break;
@@ -1935,21 +2028,18 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
                s.has_user = true;
                s.needs_lock = false;
                s.needs_fixed_file = false;
+               submit++;
 
                ret = io_submit_sqe(ctx, &s, statep);
-               if (ret) {
-                       io_drop_sqring(ctx);
-                       break;
-               }
-
-               submit++;
+               if (ret)
+                       io_cqring_add_event(ctx, s.sqe->user_data, ret, 0);
        }
        io_commit_sqring(ctx);
 
        if (statep)
                io_submit_state_end(statep);
 
-       return submit ? submit : ret;
+       return submit;
 }
 
 static unsigned io_cqring_events(struct io_cq_ring *ring)
@@ -1975,7 +2065,15 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                return 0;
 
        if (sig) {
-               ret = set_user_sigmask(sig, &ksigmask, &sigsaved, sigsz);
+#ifdef CONFIG_COMPAT
+               if (in_compat_syscall())
+                       ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
+                                                     &ksigmask, &sigsaved, sigsz);
+               else
+#endif
+                       ret = set_user_sigmask(sig, &ksigmask,
+                                              &sigsaved, sigsz);
+
                if (ret)
                        return ret;
        }
@@ -2039,6 +2137,7 @@ static void io_sq_thread_stop(struct io_ring_ctx *ctx)
        if (ctx->sqo_thread) {
                ctx->sqo_stop = 1;
                mb();
+               kthread_park(ctx->sqo_thread);
                kthread_stop(ctx->sqo_thread);
                ctx->sqo_thread = NULL;
        }
@@ -2200,6 +2299,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
                        fput(ctx->user_files[i]);
 
                kfree(ctx->user_files);
+               ctx->user_files = NULL;
                ctx->nr_user_files = 0;
                return ret;
        }
@@ -2220,19 +2320,23 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
        mmgrab(current->mm);
        ctx->sqo_mm = current->mm;
 
-       ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
-       if (!ctx->sq_thread_idle)
-               ctx->sq_thread_idle = HZ;
+       if (ctx->flags & IORING_SETUP_SQPOLL) {
+               ret = -EPERM;
+               if (!capable(CAP_SYS_ADMIN))
+                       goto err;
 
-       ret = -EINVAL;
-       if (!cpu_possible(p->sq_thread_cpu))
-               goto err;
+               ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
+               if (!ctx->sq_thread_idle)
+                       ctx->sq_thread_idle = HZ;
 
-       if (ctx->flags & IORING_SETUP_SQPOLL) {
                if (p->flags & IORING_SETUP_SQ_AFF) {
-                       int cpu;
+                       int cpu = array_index_nospec(p->sq_thread_cpu,
+                                                       nr_cpu_ids);
+
+                       ret = -EINVAL;
+                       if (!cpu_possible(cpu))
+                               goto err;
 
-                       cpu = array_index_nospec(p->sq_thread_cpu, NR_CPUS);
                        ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
                                                        ctx, cpu,
                                                        "io_uring-sq");
@@ -2293,8 +2397,12 @@ static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
 
 static void io_mem_free(void *ptr)
 {
-       struct page *page = virt_to_head_page(ptr);
+       struct page *page;
+
+       if (!ptr)
+               return;
 
+       page = virt_to_head_page(ptr);
        if (put_page_testzero(page))
                free_compound_page(page);
 }
@@ -2335,7 +2443,7 @@ static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
 
                if (ctx->account_mem)
                        io_unaccount_mem(ctx->user, imu->nr_bvecs);
-               kfree(imu->bvec);
+               kvfree(imu->bvec);
                imu->nr_bvecs = 0;
        }
 
@@ -2427,9 +2535,9 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
                if (!pages || nr_pages > got_pages) {
                        kfree(vmas);
                        kfree(pages);
-                       pages = kmalloc_array(nr_pages, sizeof(struct page *),
+                       pages = kvmalloc_array(nr_pages, sizeof(struct page *),
                                                GFP_KERNEL);
-                       vmas = kmalloc_array(nr_pages,
+                       vmas = kvmalloc_array(nr_pages,
                                        sizeof(struct vm_area_struct *),
                                        GFP_KERNEL);
                        if (!pages || !vmas) {
@@ -2441,7 +2549,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
                        got_pages = nr_pages;
                }
 
-               imu->bvec = kmalloc_array(nr_pages, sizeof(struct bio_vec),
+               imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
                                                GFP_KERNEL);
                ret = -ENOMEM;
                if (!imu->bvec) {
@@ -2480,6 +2588,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
                        }
                        if (ctx->account_mem)
                                io_unaccount_mem(ctx->user, nr_pages);
+                       kvfree(imu->bvec);
                        goto err;
                }
 
@@ -2502,12 +2611,12 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
 
                ctx->nr_user_bufs++;
        }
-       kfree(pages);
-       kfree(vmas);
+       kvfree(pages);
+       kvfree(vmas);
        return 0;
 err:
-       kfree(pages);
-       kfree(vmas);
+       kvfree(pages);
+       kvfree(vmas);
        io_sqe_buffer_unregister(ctx);
        return ret;
 }
@@ -2545,9 +2654,13 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
        __poll_t mask = 0;
 
        poll_wait(file, &ctx->cq_wait, wait);
-       /* See comment at the top of this file */
+       /*
+        * synchronizes with barrier from wq_has_sleeper call in
+        * io_commit_cqring
+        */
        smp_rmb();
-       if (READ_ONCE(ctx->sq_ring->r.tail) + 1 != ctx->cached_sq_head)
+       if (READ_ONCE(ctx->sq_ring->r.tail) - ctx->cached_sq_head !=
+           ctx->sq_ring->ring_entries)
                mask |= EPOLLOUT | EPOLLWRNORM;
        if (READ_ONCE(ctx->cq_ring->r.head) != ctx->cached_cq_tail)
                mask |= EPOLLIN | EPOLLRDNORM;
@@ -2658,24 +2771,12 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                mutex_lock(&ctx->uring_lock);
                submitted = io_ring_submit(ctx, to_submit);
                mutex_unlock(&ctx->uring_lock);
-
-               if (submitted < 0)
-                       goto out_ctx;
        }
        if (flags & IORING_ENTER_GETEVENTS) {
                unsigned nr_events = 0;
 
                min_complete = min(min_complete, ctx->cq_entries);
 
-               /*
-                * The application could have included the 'to_submit' count
-                * in how many events it wanted to wait for. If we failed to
-                * submit the desired count, we may need to adjust the number
-                * of events to poll/wait for.
-                */
-               if (submitted < to_submit)
-                       min_complete = min_t(unsigned, submitted, min_complete);
-
                if (ctx->flags & IORING_SETUP_IOPOLL) {
                        mutex_lock(&ctx->uring_lock);
                        ret = io_iopoll_check(ctx, &nr_events, min_complete);
@@ -2721,17 +2822,12 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
                return -EOVERFLOW;
 
        ctx->sq_sqes = io_mem_alloc(size);
-       if (!ctx->sq_sqes) {
-               io_mem_free(ctx->sq_ring);
+       if (!ctx->sq_sqes)
                return -ENOMEM;
-       }
 
        cq_ring = io_mem_alloc(struct_size(cq_ring, cqes, p->cq_entries));
-       if (!cq_ring) {
-               io_mem_free(ctx->sq_ring);
-               io_mem_free(ctx->sq_sqes);
+       if (!cq_ring)
                return -ENOMEM;
-       }
 
        ctx->cq_ring = cq_ring;
        cq_ring->ring_mask = p->cq_entries - 1;
@@ -2902,11 +2998,31 @@ SYSCALL_DEFINE2(io_uring_setup, u32, entries,
 
 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
                               void __user *arg, unsigned nr_args)
+       __releases(ctx->uring_lock)
+       __acquires(ctx->uring_lock)
 {
        int ret;
 
+       /*
+        * We're inside the ring mutex, if the ref is already dying, then
+        * someone else killed the ctx or is already going through
+        * io_uring_register().
+        */
+       if (percpu_ref_is_dying(&ctx->refs))
+               return -ENXIO;
+
        percpu_ref_kill(&ctx->refs);
+
+       /*
+        * Drop uring mutex before waiting for references to exit. If another
+        * thread is currently inside io_uring_enter() it might need to grab
+        * the uring_lock to make progress. If we hold it here across the drain
+        * wait, then we can deadlock. It's safe to drop the mutex here, since
+        * no new references will come in after we've killed the percpu ref.
+        */
+       mutex_unlock(&ctx->uring_lock);
        wait_for_completion(&ctx->ctx_done);
+       mutex_lock(&ctx->uring_lock);
 
        switch (opcode) {
        case IORING_REGISTER_BUFFERS:
index 97cb9d486a7da38527aa8bc7349692fedcce9e70..abdd18e404f8cdd4f85d4e014af1d4a5edf8bbd2 100644 (file)
@@ -1589,12 +1589,14 @@ static void iomap_dio_bio_end_io(struct bio *bio)
        if (should_dirty) {
                bio_check_pages_dirty(bio);
        } else {
-               struct bio_vec *bvec;
-               int i;
-               struct bvec_iter_all iter_all;
+               if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
+                       struct bvec_iter_all iter_all;
+                       struct bio_vec *bvec;
+                       int i;
 
-               bio_for_each_segment_all(bvec, bio, i, iter_all)
-                       put_page(bvec->bv_page);
+                       bio_for_each_segment_all(bvec, bio, i, iter_all)
+                               put_page(bvec->bv_page);
+               }
                bio_put(bio);
        }
 }
index 389ea53ea487538061ff3b6da78e27df2f894ac1..bccfc40b3a74ab002e45a07149afbe09634d8f64 100644 (file)
@@ -1414,11 +1414,6 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
 
        jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
 
-       if (f->target) {
-               kfree(f->target);
-               f->target = NULL;
-       }
-
        fds = f->dents;
        while(fds) {
                fd = fds;
index bb6ae387469f4d020424bfb13333e24d84e68123..05d892c79339f97276c81337c26929d8f53b7db2 100644 (file)
@@ -47,7 +47,10 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb)
 static void jffs2_i_callback(struct rcu_head *head)
 {
        struct inode *inode = container_of(head, struct inode, i_rcu);
-       kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
+       struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+
+       kfree(f->target);
+       kmem_cache_free(jffs2_inode_cachep, f);
 }
 
 static void jffs2_destroy_inode(struct inode *inode)
index 93fb7cf0b92b631358cf36eab60d5947cc0312a7..f0b5c987d6ae14cc39a281668d5daf4d658cbe67 100644 (file)
@@ -290,12 +290,11 @@ void nlmclnt_release_host(struct nlm_host *host)
 
        WARN_ON_ONCE(host->h_server);
 
-       if (refcount_dec_and_test(&host->h_count)) {
+       if (refcount_dec_and_mutex_lock(&host->h_count, &nlm_host_mutex)) {
                WARN_ON_ONCE(!list_empty(&host->h_lockowners));
                WARN_ON_ONCE(!list_empty(&host->h_granted));
                WARN_ON_ONCE(!list_empty(&host->h_reclaim));
 
-               mutex_lock(&nlm_host_mutex);
                nlm_destroy_host_locked(host);
                mutex_unlock(&nlm_host_mutex);
        }
index eaa1cfaf73b08c8fda256ea57ab816dcc4985217..71d0c6c2aac5ccde4d69cdf7396fbdbdfb40c3e6 100644 (file)
@@ -1160,6 +1160,11 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
                         */
                        error = -EDEADLK;
                        spin_lock(&blocked_lock_lock);
+                       /*
+                        * Ensure that we don't find any locks blocked on this
+                        * request during deadlock detection.
+                        */
+                       __locks_wake_up_blocks(request);
                        if (likely(!posix_locks_deadlock(request, fl))) {
                                error = FILE_LOCK_DEFERRED;
                                __locks_insert_block(fl, request,
index fb1cf1a4bda2a105e60cb23d95dea4b3abc09f70..90d71fda65cecfb3958cc4391240e2a09bac783e 100644 (file)
@@ -453,7 +453,7 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
        case XPRT_TRANSPORT_RDMA:
                if (retrans == NFS_UNSPEC_RETRANS)
                        to->to_retries = NFS_DEF_TCP_RETRANS;
-               if (timeo == NFS_UNSPEC_TIMEO || to->to_retries == 0)
+               if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0)
                        to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10;
                if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
                        to->to_initval = NFS_MAX_TCP_TIMEOUT;
index f9264e1922a28b836367b145c215d9ceb8883843..6673d4ff5a2a846c01e2de3e909e167da30156cb 100644 (file)
@@ -1289,6 +1289,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
 static int ff_layout_read_done_cb(struct rpc_task *task,
                                struct nfs_pgio_header *hdr)
 {
+       int new_idx = hdr->pgio_mirror_idx;
        int err;
 
        trace_nfs4_pnfs_read(hdr, task->tk_status);
@@ -1307,7 +1308,7 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
        case -NFS4ERR_RESET_TO_PNFS:
                if (ff_layout_choose_best_ds_for_read(hdr->lseg,
                                        hdr->pgio_mirror_idx + 1,
-                                       &hdr->pgio_mirror_idx))
+                                       &new_idx))
                        goto out_layouterror;
                set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
                return task->tk_status;
@@ -1320,7 +1321,9 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
 
        return 0;
 out_layouterror:
+       ff_layout_read_record_layoutstats_done(task, hdr);
        ff_layout_send_layouterror(hdr->lseg);
+       hdr->pgio_mirror_idx = new_idx;
 out_eagain:
        rpc_restart_call_prepare(task);
        return -EAGAIN;
index ff6f85fb676b7c1094878b269d35f2f127ca5fb5..5196bfa7894d21c0eb1220f4d9bb9e51e2593afb 100644 (file)
@@ -329,9 +329,6 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
        };
        ssize_t err, err2;
 
-       if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY))
-               return -EOPNOTSUPP;
-
        src_lock = nfs_get_lock_context(nfs_file_open_context(src));
        if (IS_ERR(src_lock))
                return PTR_ERR(src_lock);
index 45b2322e092d2455b508a8fdc00f5bde0b73c4e9..00d17198ee12aa7f6177bd3c1c5830fa655d1033 100644 (file)
@@ -133,8 +133,10 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
                                    struct file *file_out, loff_t pos_out,
                                    size_t count, unsigned int flags)
 {
+       if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY))
+               return -EOPNOTSUPP;
        if (file_inode(file_in) == file_inode(file_out))
-               return -EINVAL;
+               return -EOPNOTSUPP;
        return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
 }
 
index 4dbb0ee234324db3275de7c7a26fc3bcd040171a..741ff8c9c6ed3f7cda214ec0157eb6d9461ebdca 100644 (file)
@@ -2933,7 +2933,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
        }
 
 out:
-       nfs4_sequence_free_slot(&opendata->o_res.seq_res);
+       if (!opendata->cancelled)
+               nfs4_sequence_free_slot(&opendata->o_res.seq_res);
        return ret;
 }
 
@@ -6301,7 +6302,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
        p->arg.seqid = seqid;
        p->res.seqid = seqid;
        p->lsp = lsp;
-       refcount_inc(&lsp->ls_count);
        /* Ensure we don't close file until we're done freeing locks! */
        p->ctx = get_nfs_open_context(ctx);
        p->l_ctx = nfs_get_lock_context(ctx);
@@ -6526,7 +6526,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
        p->res.lock_seqid = p->arg.lock_seqid;
        p->lsp = lsp;
        p->server = server;
-       refcount_inc(&lsp->ls_count);
        p->ctx = get_nfs_open_context(ctx);
        locks_init_lock(&p->fl);
        locks_copy_lock(&p->fl, fl);
index cfcabc33e24d01136ba00c336f90497f657fb0a5..602446158bfb56e1fe62b74411d276431baef8c6 100644 (file)
@@ -2589,7 +2589,7 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
                        ARRAY_SIZE(nfs4_acl_bitmap), &hdr);
 
        rpc_prepare_reply_pages(req, args->acl_pages, 0,
-                               args->acl_len, replen);
+                               args->acl_len, replen + 1);
        encode_nops(&hdr);
 }
 
@@ -2811,7 +2811,7 @@ static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
        }
 
        rpc_prepare_reply_pages(req, (struct page **)&args->page, 0,
-                               PAGE_SIZE, replen);
+                               PAGE_SIZE, replen + 1);
        encode_nops(&hdr);
 }
 
index 23790c7b2289d21328db2a824eef5c6484e29089..c27ac96a95bd3535bc893493492fdc7681aba1fe 100644 (file)
@@ -2041,7 +2041,8 @@ static int nfs23_validate_mount_data(void *options,
                memcpy(sap, &data->addr, sizeof(data->addr));
                args->nfs_server.addrlen = sizeof(data->addr);
                args->nfs_server.port = ntohs(data->addr.sin_port);
-               if (!nfs_verify_server_address(sap))
+               if (sap->sa_family != AF_INET ||
+                   !nfs_verify_server_address(sap))
                        goto out_no_address;
 
                if (!(data->flags & NFS_MOUNT_TCP))
index 8f933e84cec18221f4645b769ea9d1914bc3e627..9bc32af4e2daff14817c4306833009c1d9ab92aa 100644 (file)
@@ -442,7 +442,9 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
        struct nfsd3_readdirargs *argp = rqstp->rq_argp;
        struct nfsd3_readdirres  *resp = rqstp->rq_resp;
        __be32          nfserr;
-       int             count;
+       int             count = 0;
+       struct page     **p;
+       caddr_t         page_addr = NULL;
 
        dprintk("nfsd: READDIR(3)  %s %d bytes at %d\n",
                                SVCFH_fmt(&argp->fh),
@@ -462,7 +464,18 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
        nfserr = nfsd_readdir(rqstp, &resp->fh, (loff_t*) &argp->cookie, 
                                        &resp->common, nfs3svc_encode_entry);
        memcpy(resp->verf, argp->verf, 8);
-       resp->count = resp->buffer - argp->buffer;
+       count = 0;
+       for (p = rqstp->rq_respages + 1; p < rqstp->rq_next_page; p++) {
+               page_addr = page_address(*p);
+
+               if (((caddr_t)resp->buffer >= page_addr) &&
+                   ((caddr_t)resp->buffer < page_addr + PAGE_SIZE)) {
+                       count += (caddr_t)resp->buffer - page_addr;
+                       break;
+               }
+               count += PAGE_SIZE;
+       }
+       resp->count = count >> 2;
        if (resp->offset) {
                loff_t offset = argp->cookie;
 
index 93fea246f676ebec32213bbf3a023ea395fc01a3..8d789124ed3c18d187eea569e350e6d40a43ad7a 100644 (file)
@@ -573,6 +573,7 @@ int
 nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p)
 {
        struct nfsd3_readdirargs *args = rqstp->rq_argp;
+       int len;
        u32 max_blocksize = svc_max_payload(rqstp);
 
        p = decode_fh(p, &args->fh);
@@ -582,8 +583,14 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p)
        args->verf   = p; p += 2;
        args->dircount = ~0;
        args->count  = ntohl(*p++);
-       args->count  = min_t(u32, args->count, max_blocksize);
-       args->buffer = page_address(*(rqstp->rq_next_page++));
+       len = args->count  = min_t(u32, args->count, max_blocksize);
+
+       while (len > 0) {
+               struct page *p = *(rqstp->rq_next_page++);
+               if (!args->buffer)
+                       args->buffer = page_address(p);
+               len -= PAGE_SIZE;
+       }
 
        return xdr_argsize_check(rqstp, p);
 }
index d219159b98afc54bda6d2efee824b41487db17c0..7caa3801ce72b70de75802f0a5c1b78b1087ebb5 100644 (file)
@@ -1010,8 +1010,9 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
        cb->cb_seq_status = 1;
        cb->cb_status = 0;
        if (minorversion) {
-               if (!nfsd41_cb_get_slot(clp, task))
+               if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
                        return;
+               cb->cb_holds_slot = true;
        }
        rpc_call_start(task);
 }
@@ -1038,6 +1039,9 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
                return true;
        }
 
+       if (!cb->cb_holds_slot)
+               goto need_restart;
+
        switch (cb->cb_seq_status) {
        case 0:
                /*
@@ -1076,6 +1080,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
                        cb->cb_seq_status);
        }
 
+       cb->cb_holds_slot = false;
        clear_bit(0, &clp->cl_cb_slot_busy);
        rpc_wake_up_next(&clp->cl_cb_waitq);
        dprintk("%s: freed slot, new seqid=%d\n", __func__,
@@ -1283,6 +1288,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
        cb->cb_seq_status = 1;
        cb->cb_status = 0;
        cb->cb_need_restart = false;
+       cb->cb_holds_slot = false;
 }
 
 void nfsd4_run_cb(struct nfsd4_callback *cb)
index 6a45fb00c5fcdccabdb142096270aa6035fe2f32..f056b1d3fecd6e1d0db44b56978c23cb93300ce8 100644 (file)
@@ -265,6 +265,7 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
 static void
 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
 {
+       locks_delete_block(&nbl->nbl_lock);
        locks_release_private(&nbl->nbl_lock);
        kfree(nbl);
 }
@@ -293,11 +294,18 @@ remove_blocked_locks(struct nfs4_lockowner *lo)
                nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
                                        nbl_lru);
                list_del_init(&nbl->nbl_lru);
-               locks_delete_block(&nbl->nbl_lock);
                free_blocked_lock(nbl);
        }
 }
 
+static void
+nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
+{
+       struct nfsd4_blocked_lock       *nbl = container_of(cb,
+                                               struct nfsd4_blocked_lock, nbl_cb);
+       locks_delete_block(&nbl->nbl_lock);
+}
+
 static int
 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
 {
@@ -325,6 +333,7 @@ nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
 }
 
 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
+       .prepare        = nfsd4_cb_notify_lock_prepare,
        .done           = nfsd4_cb_notify_lock_done,
        .release        = nfsd4_cb_notify_lock_release,
 };
@@ -4863,7 +4872,6 @@ nfs4_laundromat(struct nfsd_net *nn)
                nbl = list_first_entry(&reaplist,
                                        struct nfsd4_blocked_lock, nbl_lru);
                list_del_init(&nbl->nbl_lru);
-               locks_delete_block(&nbl->nbl_lock);
                free_blocked_lock(nbl);
        }
 out:
index 396c76755b03b7cf08237b0bcd1b8a3f5de2b17d..9d6cb246c6c55737967011a919023fc2dad9c861 100644 (file)
@@ -70,6 +70,7 @@ struct nfsd4_callback {
        int cb_seq_status;
        int cb_status;
        bool cb_need_restart;
+       bool cb_holds_slot;
 };
 
 struct nfsd4_callback_ops {
index 6b9c27548997162420250a1dfc810b4d293e3436..63c6bb1f8c4dac2ed4025f070ca349ab1bc8c973 100644 (file)
@@ -346,10 +346,16 @@ static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info)
        __kernel_fsid_t fsid = {};
 
        fsnotify_foreach_obj_type(type) {
+               struct fsnotify_mark_connector *conn;
+
                if (!fsnotify_iter_should_report_type(iter_info, type))
                        continue;
 
-               fsid = iter_info->marks[type]->connector->fsid;
+               conn = READ_ONCE(iter_info->marks[type]->connector);
+               /* Mark is just getting destroyed or created? */
+               if (!conn)
+                       continue;
+               fsid = conn->fsid;
                if (WARN_ON_ONCE(!fsid.val[0] && !fsid.val[1]))
                        continue;
                return fsid;
@@ -408,8 +414,12 @@ static int fanotify_handle_event(struct fsnotify_group *group,
                        return 0;
        }
 
-       if (FAN_GROUP_FLAG(group, FAN_REPORT_FID))
+       if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) {
                fsid = fanotify_get_fsid(iter_info);
+               /* Racing with mark destruction or creation? */
+               if (!fsid.val[0] && !fsid.val[1])
+                       return 0;
+       }
 
        event = fanotify_alloc_event(group, inode, mask, data, data_type,
                                     &fsid);
index 56992b32c6bbb63839bbc95a50cb83a57398d8bb..a90bb19dcfa287c34234b2954524d7047481fe40 100644 (file)
@@ -208,6 +208,7 @@ static int copy_fid_to_user(struct fanotify_event *event, char __user *buf)
 {
        struct fanotify_event_info_fid info = { };
        struct file_handle handle = { };
+       unsigned char bounce[FANOTIFY_INLINE_FH_LEN], *fh;
        size_t fh_len = event->fh_len;
        size_t len = fanotify_event_info_len(event);
 
@@ -233,7 +234,16 @@ static int copy_fid_to_user(struct fanotify_event *event, char __user *buf)
 
        buf += sizeof(handle);
        len -= sizeof(handle);
-       if (copy_to_user(buf, fanotify_event_fh(event), fh_len))
+       /*
+        * For an inline fh, copy through stack to exclude the copy from
+        * usercopy hardening protections.
+        */
+       fh = fanotify_event_fh(event);
+       if (fh_len <= FANOTIFY_INLINE_FH_LEN) {
+               memcpy(bounce, fh, fh_len);
+               fh = bounce;
+       }
+       if (copy_to_user(buf, fh, fh_len))
                return -EFAULT;
 
        /* Pad with 0's */
index e2901fbb9f76c97a7abe1607f5aa51f53a842966..7b53598c88046f4eab660817ae165620bd48a48b 100644 (file)
@@ -519,8 +519,10 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
        fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
        if (!fsn_mark)
                return -ENOENT;
-       else if (create)
-               return -EEXIST;
+       else if (create) {
+               ret = -EEXIST;
+               goto out;
+       }
 
        i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
 
@@ -548,6 +550,7 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
        /* return the wd */
        ret = i_mark->wd;
 
+out:
        /* match the get from fsnotify_find_mark() */
        fsnotify_put_mark(fsn_mark);
 
index d593d42695618f20f585e97ddb835305f7d72e00..22acb0a79b532eb7541e3f90a1b4753acd733518 100644 (file)
@@ -239,13 +239,13 @@ static void fsnotify_drop_object(unsigned int type, void *objp)
 
 void fsnotify_put_mark(struct fsnotify_mark *mark)
 {
-       struct fsnotify_mark_connector *conn;
+       struct fsnotify_mark_connector *conn = READ_ONCE(mark->connector);
        void *objp = NULL;
        unsigned int type = FSNOTIFY_OBJ_TYPE_DETACHED;
        bool free_conn = false;
 
        /* Catch marks that were actually never attached to object */
-       if (!mark->connector) {
+       if (!conn) {
                if (refcount_dec_and_test(&mark->refcnt))
                        fsnotify_final_mark_destroy(mark);
                return;
@@ -255,10 +255,9 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
         * We have to be careful so that traversals of obj_list under lock can
         * safely grab mark reference.
         */
-       if (!refcount_dec_and_lock(&mark->refcnt, &mark->connector->lock))
+       if (!refcount_dec_and_lock(&mark->refcnt, &conn->lock))
                return;
 
-       conn = mark->connector;
        hlist_del_init_rcu(&mark->obj_list);
        if (hlist_empty(&conn->list)) {
                objp = fsnotify_detach_connector_from_object(conn, &type);
@@ -266,7 +265,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
        } else {
                __fsnotify_recalc_mask(conn);
        }
-       mark->connector = NULL;
+       WRITE_ONCE(mark->connector, NULL);
        spin_unlock(&conn->lock);
 
        fsnotify_drop_object(type, objp);
@@ -620,7 +619,7 @@ restart:
        /* mark should be the last entry.  last is the current last entry */
        hlist_add_behind_rcu(&mark->obj_list, &last->obj_list);
 added:
-       mark->connector = conn;
+       WRITE_ONCE(mark->connector, conn);
 out_err:
        spin_unlock(&conn->lock);
        spin_unlock(&mark->lock);
@@ -808,6 +807,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
        refcount_set(&mark->refcnt, 1);
        fsnotify_get_group(group);
        mark->group = group;
+       WRITE_ONCE(mark->connector, NULL);
 }
 
 /*
index a35259eebc56739b59bf7ffb5029e647dc11ad0f..1dc9a08e8bdc7b7c96a5668072faddf75b0285c2 100644 (file)
@@ -4719,22 +4719,23 @@ out:
 
 /* Lock an inode and grab a bh pointing to the inode. */
 int ocfs2_reflink_inodes_lock(struct inode *s_inode,
-                             struct buffer_head **bh1,
+                             struct buffer_head **bh_s,
                              struct inode *t_inode,
-                             struct buffer_head **bh2)
+                             struct buffer_head **bh_t)
 {
-       struct inode *inode1;
-       struct inode *inode2;
+       struct inode *inode1 = s_inode;
+       struct inode *inode2 = t_inode;
        struct ocfs2_inode_info *oi1;
        struct ocfs2_inode_info *oi2;
+       struct buffer_head *bh1 = NULL;
+       struct buffer_head *bh2 = NULL;
        bool same_inode = (s_inode == t_inode);
+       bool need_swap = (inode1->i_ino > inode2->i_ino);
        int status;
 
        /* First grab the VFS and rw locks. */
        lock_two_nondirectories(s_inode, t_inode);
-       inode1 = s_inode;
-       inode2 = t_inode;
-       if (inode1->i_ino > inode2->i_ino)
+       if (need_swap)
                swap(inode1, inode2);
 
        status = ocfs2_rw_lock(inode1, 1);
@@ -4757,17 +4758,13 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
        trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
                                (unsigned long long)oi2->ip_blkno);
 
-       if (*bh1)
-               *bh1 = NULL;
-       if (*bh2)
-               *bh2 = NULL;
-
        /* We always want to lock the one with the lower lockid first. */
        if (oi1->ip_blkno > oi2->ip_blkno)
                mlog_errno(-ENOLCK);
 
        /* lock id1 */
-       status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_REFLINK_TARGET);
+       status = ocfs2_inode_lock_nested(inode1, &bh1, 1,
+                                        OI_LS_REFLINK_TARGET);
        if (status < 0) {
                if (status != -ENOENT)
                        mlog_errno(status);
@@ -4776,15 +4773,25 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
 
        /* lock id2 */
        if (!same_inode) {
-               status = ocfs2_inode_lock_nested(inode2, bh2, 1,
+               status = ocfs2_inode_lock_nested(inode2, &bh2, 1,
                                                 OI_LS_REFLINK_TARGET);
                if (status < 0) {
                        if (status != -ENOENT)
                                mlog_errno(status);
                        goto out_cl1;
                }
-       } else
-               *bh2 = *bh1;
+       } else {
+               bh2 = bh1;
+       }
+
+       /*
+        * If we swapped inode order above, we have to swap the buffer heads
+        * before passing them back to the caller.
+        */
+       if (need_swap)
+               swap(bh1, bh2);
+       *bh_s = bh1;
+       *bh_t = bh2;
 
        trace_ocfs2_double_lock_end(
                        (unsigned long long)oi1->ip_blkno,
@@ -4794,8 +4801,7 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
 
 out_cl1:
        ocfs2_inode_unlock(inode1, 1);
-       brelse(*bh1);
-       *bh1 = NULL;
+       brelse(bh1);
 out_rw2:
        ocfs2_rw_unlock(inode2, 1);
 out_i2:
index 0285ce7dbd515c8c7bfd9e63f0211cabfb818801..a00350018a4792e758d5749e6294902cf32f2174 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -733,6 +733,12 @@ static int do_dentry_open(struct file *f,
                return 0;
        }
 
+       /* Any file opened for execve()/uselib() has to be a regular file. */
+       if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) {
+               error = -EACCES;
+               goto cleanup_file;
+       }
+
        if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
                error = get_write_access(inode);
                if (unlikely(error))
@@ -1209,3 +1215,21 @@ int nonseekable_open(struct inode *inode, struct file *filp)
 }
 
 EXPORT_SYMBOL(nonseekable_open);
+
+/*
+ * stream_open is used by subsystems that want stream-like file descriptors.
+ * Such file descriptors are not seekable and don't have notion of position
+ * (file.f_pos is always 0). Contrary to file descriptors of other regular
+ * files, .read() and .write() can run simultaneously.
+ *
+ * stream_open never fails and is marked to return int so that it could be
+ * directly used as file_operations.open .
+ */
+int stream_open(struct inode *inode, struct file *filp)
+{
+       filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE | FMODE_ATOMIC_POS);
+       filp->f_mode |= FMODE_STREAM;
+       return 0;
+}
+
+EXPORT_SYMBOL(stream_open);
index 070aad543382a4e30aa0bd5eef94b5dabba7219d..41065901106b09d4365ebc13ee6cfa7b6465339b 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -188,9 +188,9 @@ EXPORT_SYMBOL(generic_pipe_buf_steal);
  *     in the tee() system call, when we duplicate the buffers in one
  *     pipe into another.
  */
-void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
+bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
 {
-       get_page(buf->page);
+       return try_get_page(buf->page);
 }
 EXPORT_SYMBOL(generic_pipe_buf_get);
 
index ddef482f133406737e09e5df4966aea9b6ec06aa..f179568b4c767aa1f13d5d8170016030775c5cc2 100644 (file)
@@ -407,7 +407,6 @@ static void unlock_trace(struct task_struct *task)
 static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
                          struct pid *pid, struct task_struct *task)
 {
-       struct stack_trace trace;
        unsigned long *entries;
        int err;
 
@@ -430,20 +429,17 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
        if (!entries)
                return -ENOMEM;
 
-       trace.nr_entries        = 0;
-       trace.max_entries       = MAX_STACK_TRACE_DEPTH;
-       trace.entries           = entries;
-       trace.skip              = 0;
-
        err = lock_trace(task);
        if (!err) {
-               unsigned int i;
+               unsigned int i, nr_entries;
 
-               save_stack_trace_tsk(task, &trace);
+               nr_entries = stack_trace_save_tsk(task, entries,
+                                                 MAX_STACK_TRACE_DEPTH, 0);
 
-               for (i = 0; i < trace.nr_entries; i++) {
+               for (i = 0; i < nr_entries; i++) {
                        seq_printf(m, "[<0>] %pB\n", (void *)entries[i]);
                }
+
                unlock_trace(task);
        }
        kfree(entries);
@@ -489,10 +485,9 @@ static int lstats_show_proc(struct seq_file *m, void *v)
                                   lr->count, lr->time, lr->max);
                        for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
                                unsigned long bt = lr->backtrace[q];
+
                                if (!bt)
                                        break;
-                               if (bt == ULONG_MAX)
-                                       break;
                                seq_printf(m, " %ps", (void *)bt);
                        }
                        seq_putc(m, '\n');
@@ -616,24 +611,25 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
                            struct pid *pid, struct task_struct *task)
 {
-       long nr;
-       unsigned long args[6], sp, pc;
+       struct syscall_info info;
+       u64 *args = &info.data.args[0];
        int res;
 
        res = lock_trace(task);
        if (res)
                return res;
 
-       if (task_current_syscall(task, &nr, args, 6, &sp, &pc))
+       if (task_current_syscall(task, &info))
                seq_puts(m, "running\n");
-       else if (nr < 0)
-               seq_printf(m, "%ld 0x%lx 0x%lx\n", nr, sp, pc);
+       else if (info.data.nr < 0)
+               seq_printf(m, "%d 0x%llx 0x%llx\n",
+                          info.data.nr, info.sp, info.data.instruction_pointer);
        else
                seq_printf(m,
-                      "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
-                      nr,
+                      "%d 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
+                      info.data.nr,
                       args[0], args[1], args[2], args[3], args[4], args[5],
-                      sp, pc);
+                      info.sp, info.data.instruction_pointer);
        unlock_trace(task);
 
        return 0;
index bbcc185062bb5c8198f7cefbc85520c5adeac0e2..f5834488b67d564d91b5534c210fc7fcd8e95969 100644 (file)
@@ -54,6 +54,28 @@ static LIST_HEAD(kclist_head);
 static DECLARE_RWSEM(kclist_lock);
 static int kcore_need_update = 1;
 
+/*
+ * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
+ * Same as oldmem_pfn_is_ram in vmcore
+ */
+static int (*mem_pfn_is_ram)(unsigned long pfn);
+
+int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
+{
+       if (mem_pfn_is_ram)
+               return -EBUSY;
+       mem_pfn_is_ram = fn;
+       return 0;
+}
+
+static int pfn_is_ram(unsigned long pfn)
+{
+       if (mem_pfn_is_ram)
+               return mem_pfn_is_ram(pfn);
+       else
+               return 1;
+}
+
 /* This doesn't grab kclist_lock, so it should only be used at init time. */
 void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
                       int type)
@@ -465,6 +487,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
                                goto out;
                        }
                        m = NULL;       /* skip the list anchor */
+               } else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) {
+                       if (clear_user(buffer, tsz)) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
                } else if (m->type == KCORE_VMALLOC) {
                        vread(buf, (char *)start, tsz);
                        /* we have to zero-fill user buffer even if no read */
@@ -588,7 +615,7 @@ static void __init proc_kcore_text_init(void)
 /*
  * MODULES_VADDR has no intersection with VMALLOC_ADDR.
  */
-struct kcore_list kcore_modules;
+static struct kcore_list kcore_modules;
 static void __init add_modules_range(void)
 {
        if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
index 4d598a399bbff1b32becb1cf24406f9b9e80287c..7325baa8f9d474f166c1bbef54b584a028b287fb 100644 (file)
@@ -1626,8 +1626,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
        if (--header->nreg)
                return;
 
-       put_links(header);
-       start_unregistering(header);
+       if (parent) {
+               put_links(header);
+               start_unregistering(header);
+       }
+
        if (!--header->count)
                kfree_rcu(header, rcu);
 
index 92a91e7816d8472c3451a99a456f6f5b7b84c5b5..95ca1fe7283cff265247c6f3a84e5fa573299fca 100644 (file)
@@ -1143,6 +1143,24 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                                        count = -EINTR;
                                        goto out_mm;
                                }
+                               /*
+                                * Avoid to modify vma->vm_flags
+                                * without locked ops while the
+                                * coredump reads the vm_flags.
+                                */
+                               if (!mmget_still_valid(mm)) {
+                                       /*
+                                        * Silently return "count"
+                                        * like if get_task_mm()
+                                        * failed. FIXME: should this
+                                        * function have returned
+                                        * -ESRCH if get_task_mm()
+                                        * failed like if
+                                        * get_proc_task() fails?
+                                        */
+                                       up_write(&mm->mmap_sem);
+                                       goto out_mm;
+                               }
                                for (vma = mm->mmap; vma; vma = vma->vm_next) {
                                        vma->vm_flags &= ~VM_SOFTDIRTY;
                                        vma_set_page_prot(vma);
index 177ccc3d405a33b425998e400b105418d8f537a4..61b43ad7608e301336662d7cab1cf6a7bda8067c 100644 (file)
@@ -560,12 +560,13 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
 
 static inline loff_t file_pos_read(struct file *file)
 {
-       return file->f_pos;
+       return file->f_mode & FMODE_STREAM ? 0 : file->f_pos;
 }
 
 static inline void file_pos_write(struct file *file, loff_t pos)
 {
-       file->f_pos = pos;
+       if ((file->f_mode & FMODE_STREAM) == 0)
+               file->f_pos = pos;
 }
 
 ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count)
index 3ee7e82df48f2b14d09ddd3b4877879e90f8c418..25212dcca2dfd6b43dc51cd8887f93038c753515 100644 (file)
@@ -330,8 +330,8 @@ const struct pipe_buf_operations default_pipe_buf_ops = {
        .get = generic_pipe_buf_get,
 };
 
-static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
-                                   struct pipe_buffer *buf)
+int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
+                            struct pipe_buffer *buf)
 {
        return 1;
 }
@@ -1593,7 +1593,11 @@ retry:
                         * Get a reference to this pipe buffer,
                         * so we can copy the contents over.
                         */
-                       pipe_buf_get(ipipe, ibuf);
+                       if (!pipe_buf_get(ipipe, ibuf)) {
+                               if (ret == 0)
+                                       ret = -EFAULT;
+                               break;
+                       }
                        *obuf = *ibuf;
 
                        /*
@@ -1667,7 +1671,11 @@ static int link_pipe(struct pipe_inode_info *ipipe,
                 * Get a reference to this pipe buffer,
                 * so we can copy the contents over.
                 */
-               pipe_buf_get(ipipe, ibuf);
+               if (!pipe_buf_get(ipipe, ibuf)) {
+                       if (ret == 0)
+                               ret = -EFAULT;
+                       break;
+               }
 
                obuf = opipe->bufs + nbuf;
                *obuf = *ibuf;
index 583a0124bc394d2919b2fbc346948c5ab9684201..2739f57515f81d8fad4bc4edc83d4c98773308d9 100644 (file)
@@ -1467,11 +1467,6 @@ int vfs_get_tree(struct fs_context *fc)
        struct super_block *sb;
        int error;
 
-       if (fc->fs_type->fs_flags & FS_REQUIRES_DEV && !fc->source) {
-               errorf(fc, "Filesystem requires source device");
-               return -ENOENT;
-       }
-
        if (fc->root)
                return -EBUSY;
 
index 8dc2818fdd84990b74e07a8d479de994b8e5be9b..12628184772c04b27c975568101a4b5cddf442f8 100644 (file)
@@ -276,14 +276,12 @@ static void ubifs_i_callback(struct rcu_head *head)
 {
        struct inode *inode = container_of(head, struct inode, i_rcu);
        struct ubifs_inode *ui = ubifs_inode(inode);
+       kfree(ui->data);
        kmem_cache_free(ubifs_inode_slab, ui);
 }
 
 static void ubifs_destroy_inode(struct inode *inode)
 {
-       struct ubifs_inode *ui = ubifs_inode(inode);
-
-       kfree(ui->data);
        call_rcu(&inode->i_rcu, ubifs_i_callback);
 }
 
index ae796e10f68b2524423dae6d3840c7a9d05cf4c3..e7276932e433c9cdc1bdd10f9a35a3d0ef09ece7 100644 (file)
@@ -1242,8 +1242,10 @@ set_size:
                truncate_setsize(inode, newsize);
                down_write(&iinfo->i_data_sem);
                udf_clear_extent_cache(inode);
-               udf_truncate_extents(inode);
+               err = udf_truncate_extents(inode);
                up_write(&iinfo->i_data_sem);
+               if (err)
+                       return err;
        }
 update_time:
        inode->i_mtime = inode->i_ctime = current_time(inode);
index b647f0bd150c46ba6b202989052b1849bb60c4c5..63a47f1e1d529c1c98a1f25c86cd25880c1737ac 100644 (file)
@@ -199,7 +199,7 @@ static void udf_update_alloc_ext_desc(struct inode *inode,
  * for making file shorter. For making file longer, udf_extend_file() has to
  * be used.
  */
-void udf_truncate_extents(struct inode *inode)
+int udf_truncate_extents(struct inode *inode)
 {
        struct extent_position epos;
        struct kernel_lb_addr eloc, neloc = {};
@@ -224,7 +224,7 @@ void udf_truncate_extents(struct inode *inode)
        if (etype == -1) {
                /* We should extend the file? */
                WARN_ON(byte_offset);
-               return;
+               return 0;
        }
        epos.offset -= adsize;
        extent_trunc(inode, &epos, &eloc, etype, elen, byte_offset);
@@ -260,6 +260,9 @@ void udf_truncate_extents(struct inode *inode)
                        epos.block = eloc;
                        epos.bh = udf_tread(sb,
                                        udf_get_lb_pblock(sb, &eloc, 0));
+                       /* Error reading indirect block? */
+                       if (!epos.bh)
+                               return -EIO;
                        if (elen)
                                indirect_ext_len =
                                        (elen + sb->s_blocksize - 1) >>
@@ -283,4 +286,5 @@ void udf_truncate_extents(struct inode *inode)
        iinfo->i_lenExtents = inode->i_size;
 
        brelse(epos.bh);
+       return 0;
 }
index ee246769dee4a2c54f31b56c5237f77f7db3a040..d89ef71887fcfa1aed13bacc77a1c42ee3a55c1a 100644 (file)
@@ -235,7 +235,7 @@ extern struct inode *udf_new_inode(struct inode *, umode_t);
 /* truncate.c */
 extern void udf_truncate_tail_extent(struct inode *);
 extern void udf_discard_prealloc(struct inode *);
-extern void udf_truncate_extents(struct inode *);
+extern int udf_truncate_extents(struct inode *);
 
 /* balloc.c */
 extern void udf_free_blocks(struct super_block *, struct inode *,
index 1fd3011ea6236b6f0bcb04bd52051c0ffd930cda..7fd4802222b8c88e579c7845c94be6ccff07b5a0 100644 (file)
@@ -229,7 +229,7 @@ ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
        case UFS_UID_44BSD:
                return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid);
        case UFS_UID_EFT:
-               if (inode->ui_u1.oldids.ui_suid == 0xFFFF)
+               if (inode->ui_u1.oldids.ui_sgid == 0xFFFF)
                        return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
                /* Fall through */
        default:
index 89800fc7dc9d562cd3557988adc766fa41c51209..f5de1e726356a51c27ff529f98d99032650eb839 100644 (file)
@@ -629,6 +629,8 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
 
                /* the various vma->vm_userfaultfd_ctx still points to it */
                down_write(&mm->mmap_sem);
+               /* no task can run (and in turn coredump) yet */
+               VM_WARN_ON(!mmget_still_valid(mm));
                for (vma = mm->mmap; vma; vma = vma->vm_next)
                        if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
                                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
@@ -883,6 +885,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
         * taking the mmap_sem for writing.
         */
        down_write(&mm->mmap_sem);
+       if (!mmget_still_valid(mm))
+               goto skip_mm;
        prev = NULL;
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                cond_resched();
@@ -905,6 +909,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
                vma->vm_flags = new_flags;
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
        }
+skip_mm:
        up_write(&mm->mmap_sem);
        mmput(mm);
 wakeup:
@@ -1333,6 +1338,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
                goto out;
 
        down_write(&mm->mmap_sem);
+       if (!mmget_still_valid(mm))
+               goto out_unlock;
        vma = find_vma_prev(mm, start, &prev);
        if (!vma)
                goto out_unlock;
@@ -1520,6 +1527,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                goto out;
 
        down_write(&mm->mmap_sem);
+       if (!mmget_still_valid(mm))
+               goto out_unlock;
        vma = find_vma_prev(mm, start, &prev);
        if (!vma)
                goto out_unlock;
index 48502cb9990f184a55b780372adaef3bda406509..4637ae1ae91ca8ef6007c05ba060dd9fb208fdf1 100644 (file)
@@ -1191,7 +1191,10 @@ xfs_iread_extents(
         * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
         */
        level = be16_to_cpu(block->bb_level);
-       ASSERT(level > 0);
+       if (unlikely(level == 0)) {
+               XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+               return -EFSCORRUPTED;
+       }
        pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
        bno = be64_to_cpu(*pp);
 
@@ -4249,9 +4252,13 @@ xfs_bmapi_write(
        struct xfs_bmbt_irec    *mval,          /* output: map values */
        int                     *nmap)          /* i/o: mval size/count */
 {
+       struct xfs_bmalloca     bma = {
+               .tp             = tp,
+               .ip             = ip,
+               .total          = total,
+       };
        struct xfs_mount        *mp = ip->i_mount;
        struct xfs_ifork        *ifp;
-       struct xfs_bmalloca     bma = { NULL }; /* args for xfs_bmap_alloc */
        xfs_fileoff_t           end;            /* end of mapped file region */
        bool                    eof = false;    /* after the end of extents */
        int                     error;          /* error return */
@@ -4319,10 +4326,6 @@ xfs_bmapi_write(
                eof = true;
        if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
                bma.prev.br_startoff = NULLFILEOFF;
-       bma.tp = tp;
-       bma.ip = ip;
-       bma.total = total;
-       bma.datatype = 0;
        bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
 
        n = 0;
index 6f94d1f7322d0a33bd00134c8136ac3831b4fe2c..117910db51b809ebeea0196182e05f0dd0c54611 100644 (file)
@@ -415,8 +415,17 @@ xchk_btree_check_owner(
        struct xfs_btree_cur    *cur = bs->cur;
        struct check_owner      *co;
 
-       if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && bp == NULL)
+       /*
+        * In theory, xfs_btree_get_block should only give us a null buffer
+        * pointer for the root of a root-in-inode btree type, but we need
+        * to check defensively here in case the cursor state is also screwed
+        * up.
+        */
+       if (bp == NULL) {
+               if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE))
+                       xchk_btree_set_corrupt(bs->sc, bs->cur, level);
                return 0;
+       }
 
        /*
         * We want to cross-reference each btree block with the bnobt
index f1260b4bfdeed62440cc238138e7fd405c2dccf1..90527b094878971f831c78daafe2483dd99e83d2 100644 (file)
@@ -574,6 +574,11 @@ xchk_da_btree(
                /* Drill another level deeper. */
                blkno = be32_to_cpu(key->before);
                level++;
+               if (level >= XFS_DA_NODE_MAXDEPTH) {
+                       /* Too deep! */
+                       xchk_da_set_corrupt(&ds, level - 1);
+                       break;
+               }
                ds.tree_level--;
                error = xchk_da_btree_block(&ds, level, blkno);
                if (error)
index 93f07edafd8183a14ca55fae7ffdfad0370ccc89..9ee2a7d02e7059f29c103da1088b7c854ca023bb 100644 (file)
@@ -161,6 +161,14 @@ xfs_ioc_trim(
                return -EPERM;
        if (!blk_queue_discard(q))
                return -EOPNOTSUPP;
+
+       /*
+        * We haven't recovered the log, so we cannot use our bnobt-guided
+        * storage zapping commands.
+        */
+       if (mp->m_flags & XFS_MOUNT_NORECOVERY)
+               return -EROFS;
+
        if (copy_from_user(&range, urange, sizeof(range)))
                return -EFAULT;
 
index 1f2e2845eb76c2c78a932c913057e1028cec2f05..a7ceae90110eded646f13acf4314131574d46a69 100644 (file)
@@ -529,18 +529,17 @@ xfs_file_dio_aio_write(
        count = iov_iter_count(from);
 
        /*
-        * If we are doing unaligned IO, wait for all other IO to drain,
-        * otherwise demote the lock if we had to take the exclusive lock
-        * for other reasons in xfs_file_aio_write_checks.
+        * If we are doing unaligned IO, we can't allow any other overlapping IO
+        * in-flight at the same time or we risk data corruption. Wait for all
+        * other IO to drain before we submit. If the IO is aligned, demote the
+        * iolock if we had to take the exclusive lock in
+        * xfs_file_aio_write_checks() for other reasons.
         */
        if (unaligned_io) {
-               /* If we are going to wait for other DIO to finish, bail */
-               if (iocb->ki_flags & IOCB_NOWAIT) {
-                       if (atomic_read(&inode->i_dio_count))
-                               return -EAGAIN;
-               } else {
-                       inode_dio_wait(inode);
-               }
+               /* unaligned dio always waits, bail */
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       return -EAGAIN;
+               inode_dio_wait(inode);
        } else if (iolock == XFS_IOLOCK_EXCL) {
                xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
                iolock = XFS_IOLOCK_SHARED;
@@ -548,6 +547,14 @@ xfs_file_dio_aio_write(
 
        trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
        ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
+
+       /*
+        * If unaligned, this is the only IO in-flight. If it has not yet
+        * completed, wait on it before we release the iolock to prevent
+        * subsequent overlapping IO.
+        */
+       if (ret == -EIOCBQUEUED && unaligned_io)
+               inode_dio_wait(inode);
 out:
        xfs_iunlock(ip, iolock);
 
index 30b1ae53689fcffcb6a7a0b20437745c6cebd43c..c50542dc71e0b24912571eaa09039a98d3a6ef9f 100644 (file)
 
 /* Defaults for debug_level, debug and normal */
 
+#ifndef ACPI_DEBUG_DEFAULT
 #define ACPI_DEBUG_DEFAULT          (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR)
+#endif
+
 #define ACPI_NORMAL_DEFAULT         (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
 #define ACPI_DEBUG_ALL              (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
 
index 9ff328fd946a2b8ef332b6d13bdfc07ce403f5e4..624b90b340852c65104d8de894fb7d9a5a555f53 100644 (file)
 #define ACPI_NO_ERROR_MESSAGES
 #undef ACPI_DEBUG_OUTPUT
 
+/* Use a specific bugging default separate from ACPICA */
+
+#undef ACPI_DEBUG_DEFAULT
+#define ACPI_DEBUG_DEFAULT          (ACPI_LV_INFO | ACPI_LV_REPAIR)
+
 /* External interface for __KERNEL__, stub is needed */
 
 #define ACPI_EXTERNAL_RETURN_STATUS(prototype) \
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
deleted file mode 100644 (file)
index 93e67a0..0000000
+++ /dev/null
@@ -1,140 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_RWSEM_H
-#define _ASM_GENERIC_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
-#endif
-
-#ifdef __KERNEL__
-
-/*
- * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
- * Adapted largely from include/asm-i386/rwsem.h
- * by Paul Mackerras <paulus@samba.org>.
- */
-
-/*
- * the semaphore definition
- */
-#ifdef CONFIG_64BIT
-# define RWSEM_ACTIVE_MASK             0xffffffffL
-#else
-# define RWSEM_ACTIVE_MASK             0x0000ffffL
-#endif
-
-#define RWSEM_UNLOCKED_VALUE           0x00000000L
-#define RWSEM_ACTIVE_BIAS              0x00000001L
-#define RWSEM_WAITING_BIAS             (-RWSEM_ACTIVE_MASK-1)
-#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
-       if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
-               rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_killable(struct rw_semaphore *sem)
-{
-       if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
-               if (IS_ERR(rwsem_down_read_failed_killable(sem)))
-                       return -EINTR;
-       }
-
-       return 0;
-}
-
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
-       long tmp;
-
-       while ((tmp = atomic_long_read(&sem->count)) >= 0) {
-               if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
-                                  tmp + RWSEM_ACTIVE_READ_BIAS)) {
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write(struct rw_semaphore *sem)
-{
-       long tmp;
-
-       tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
-                                            &sem->count);
-       if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
-               rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_killable(struct rw_semaphore *sem)
-{
-       long tmp;
-
-       tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
-                                            &sem->count);
-       if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
-               if (IS_ERR(rwsem_down_write_failed_killable(sem)))
-                       return -EINTR;
-       return 0;
-}
-
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
-       long tmp;
-
-       tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
-                     RWSEM_ACTIVE_WRITE_BIAS);
-       return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-       long tmp;
-
-       tmp = atomic_long_dec_return_release(&sem->count);
-       if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
-               rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-       if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
-                                                   &sem->count) < 0))
-               rwsem_wake(sem);
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
-       long tmp;
-
-       /*
-        * When downgrading from exclusive to shared ownership,
-        * anything inside the write-locked region cannot leak
-        * into the read side. In contrast, anything in the
-        * read-locked region is ok to be re-ordered into the
-        * write side. As such, rely on RELEASE semantics.
-        */
-       tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
-       if (tmp < 0)
-               rwsem_downgrade_wake(sem);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_GENERIC_RWSEM_H */
index 0c938a4354f6f58f67e0bb3555dca24460afadb7..b88239e9efe49979a5c94a3c411f8f36ba09e541 100644 (file)
@@ -105,41 +105,30 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
  * syscall_get_arguments - extract system call parameter values
  * @task:      task of interest, must be blocked
  * @regs:      task_pt_regs() of @task
- * @i:         argument index [0,5]
- * @n:         number of arguments; n+i must be [1,6].
  * @args:      array filled with argument values
  *
- * Fetches @n arguments to the system call starting with the @i'th argument
- * (from 0 through 5).  Argument @i is stored in @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Fetches 6 arguments to the system call.  First argument is stored in
+*  @args[0], and so on.
  *
  * It's only valid to call this when @task is stopped for tracing on
  * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
  */
 void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-                          unsigned int i, unsigned int n, unsigned long *args);
+                          unsigned long *args);
 
 /**
  * syscall_set_arguments - change system call parameter value
  * @task:      task of interest, must be in system call entry tracing
  * @regs:      task_pt_regs() of @task
- * @i:         argument index [0,5]
- * @n:         number of arguments; n+i must be [1,6].
  * @args:      array of argument values to store
  *
- * Changes @n arguments to the system call starting with the @i'th argument.
- * Argument @i gets value @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Changes 6 arguments to the system call.
+ * The first argument gets value @args[0], and so on.
  *
  * It's only valid to call this when @task is stopped for tracing on
  * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
  */
 void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
-                          unsigned int i, unsigned int n,
                           const unsigned long *args);
 
 /**
index 6be86c1c5c583c811b91d3926d3ca049978d3b6f..b9edc7608d9019db11ae45088bbb9cbcd75ca168 100644 (file)
 #include <linux/swap.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
 
 #ifdef CONFIG_MMU
 
+/*
+ * Generic MMU-gather implementation.
+ *
+ * The mmu_gather data structure is used by the mm code to implement the
+ * correct and efficient ordering of freeing pages and TLB invalidations.
+ *
+ * This correct ordering is:
+ *
+ *  1) unhook page
+ *  2) TLB invalidate page
+ *  3) free page
+ *
+ * That is, we must never free a page before we have ensured there are no live
+ * translations left to it. Otherwise it might be possible to observe (or
+ * worse, change) the page content after it has been reused.
+ *
+ * The mmu_gather API consists of:
+ *
+ *  - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
+ *
+ *    Finish in particular will issue a (final) TLB invalidate and free
+ *    all (remaining) queued pages.
+ *
+ *  - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
+ *
+ *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
+ *    there's large holes between the VMAs.
+ *
+ *  - tlb_remove_page() / __tlb_remove_page()
+ *  - tlb_remove_page_size() / __tlb_remove_page_size()
+ *
+ *    __tlb_remove_page_size() is the basic primitive that queues a page for
+ *    freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
+ *    boolean indicating if the queue is (now) full and a call to
+ *    tlb_flush_mmu() is required.
+ *
+ *    tlb_remove_page() and tlb_remove_page_size() imply the call to
+ *    tlb_flush_mmu() when required and has no return value.
+ *
+ *  - tlb_change_page_size()
+ *
+ *    call before __tlb_remove_page*() to set the current page-size; implies a
+ *    possible tlb_flush_mmu() call.
+ *
+ *  - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
+ *
+ *    tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
+ *                              related state, like the range)
+ *
+ *    tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
+ *                     whatever pages are still batched.
+ *
+ *  - mmu_gather::fullmm
+ *
+ *    A flag set by tlb_gather_mmu() to indicate we're going to free
+ *    the entire mm; this allows a number of optimizations.
+ *
+ *    - We can ignore tlb_{start,end}_vma(); because we don't
+ *      care about ranges. Everything will be shot down.
+ *
+ *    - (RISC) architectures that use ASIDs can cycle to a new ASID
+ *      and delay the invalidation until ASID space runs out.
+ *
+ *  - mmu_gather::need_flush_all
+ *
+ *    A flag that can be set by the arch code if it wants to force
+ *    flush the entire TLB irrespective of the range. For instance
+ *    x86-PAE needs this when changing top-level entries.
+ *
+ * And allows the architecture to provide and implement tlb_flush():
+ *
+ * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
+ * use of:
+ *
+ *  - mmu_gather::start / mmu_gather::end
+ *
+ *    which provides the range that needs to be flushed to cover the pages to
+ *    be freed.
+ *
+ *  - mmu_gather::freed_tables
+ *
+ *    set when we freed page table pages
+ *
+ *  - tlb_get_unmap_shift() / tlb_get_unmap_size()
+ *
+ *    returns the smallest TLB entry size unmapped in this range.
+ *
+ * If an architecture does not provide tlb_flush() a default implementation
+ * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
+ * specified, in which case we'll default to flush_tlb_mm().
+ *
+ * Additionally there are a few opt-in features:
+ *
+ *  HAVE_MMU_GATHER_PAGE_SIZE
+ *
+ *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
+ *  changes the size and provides mmu_gather::page_size to tlb_flush().
+ *
+ *  HAVE_RCU_TABLE_FREE
+ *
+ *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
+ *  for page directores (__p*_free_tlb()). This provides separate freeing of
+ *  the page-table pages themselves in a semi-RCU fashion (see comment below).
+ *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
+ *  and therefore doesn't naturally serialize with software page-table walkers.
+ *
+ *  When used, an architecture is expected to provide __tlb_remove_table()
+ *  which does the actual freeing of these pages.
+ *
+ *  HAVE_RCU_TABLE_NO_INVALIDATE
+ *
+ *  This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
+ *  freeing the page-table pages. This can be avoided if you use
+ *  HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
+ *  page-tables natively.
+ *
+ *  MMU_GATHER_NO_RANGE
+ *
+ *  Use this if your architecture lacks an efficient flush_tlb_range().
+ */
+
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
 /*
  * Semi RCU freeing of the page directories.
@@ -60,11 +182,11 @@ struct mmu_table_batch {
 #define MAX_TABLE_BATCH                \
        ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
 
-extern void tlb_table_flush(struct mmu_gather *tlb);
 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 
 #endif
 
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
 /*
  * If we can't allocate a page to make a big batch of page pointers
  * to work on, then just handle a few from the on-stack structure.
@@ -89,14 +211,21 @@ struct mmu_gather_batch {
  */
 #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
 
-/* struct mmu_gather is an opaque type used by the mm code for passing around
+extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
+                                  int page_size);
+#endif
+
+/*
+ * struct mmu_gather is an opaque type used by the mm code for passing around
  * any data needed by arch specific code for tlb_remove_page.
  */
 struct mmu_gather {
        struct mm_struct        *mm;
+
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
        struct mmu_table_batch  *batch;
 #endif
+
        unsigned long           start;
        unsigned long           end;
        /*
@@ -124,23 +253,30 @@ struct mmu_gather {
        unsigned int            cleared_puds : 1;
        unsigned int            cleared_p4ds : 1;
 
+       /*
+        * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
+        */
+       unsigned int            vma_exec : 1;
+       unsigned int            vma_huge : 1;
+
+       unsigned int            batch_count;
+
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
        struct mmu_gather_batch *active;
        struct mmu_gather_batch local;
        struct page             *__pages[MMU_GATHER_BUNDLE];
-       unsigned int            batch_count;
-       int page_size;
-};
 
-#define HAVE_GENERIC_MMU_GATHER
+#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+       unsigned int page_size;
+#endif
+#endif
+};
 
 void arch_tlb_gather_mmu(struct mmu_gather *tlb,
        struct mm_struct *mm, unsigned long start, unsigned long end);
 void tlb_flush_mmu(struct mmu_gather *tlb);
 void arch_tlb_finish_mmu(struct mmu_gather *tlb,
                         unsigned long start, unsigned long end, bool force);
-void tlb_flush_mmu_free(struct mmu_gather *tlb);
-extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
-                                  int page_size);
 
 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
                                      unsigned long address,
@@ -163,8 +299,94 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
        tlb->cleared_pmds = 0;
        tlb->cleared_puds = 0;
        tlb->cleared_p4ds = 0;
+       /*
+        * Do not reset mmu_gather::vma_* fields here, we do not
+        * call into tlb_start_vma() again to set them if there is an
+        * intermediate flush.
+        */
+}
+
+#ifdef CONFIG_MMU_GATHER_NO_RANGE
+
+#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
+#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
+#endif
+
+/*
+ * When an architecture does not have efficient means of range flushing TLBs
+ * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
+ * range small. We equally don't have to worry about page granularity or other
+ * things.
+ *
+ * All we need to do is issue a full flush for any !0 range.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+       if (tlb->end)
+               flush_tlb_mm(tlb->mm);
+}
+
+static inline void
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+
+#define tlb_end_vma tlb_end_vma
+static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+
+#else /* CONFIG_MMU_GATHER_NO_RANGE */
+
+#ifndef tlb_flush
+
+#if defined(tlb_start_vma) || defined(tlb_end_vma)
+#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
+#endif
+
+/*
+ * When an architecture does not provide its own tlb_flush() implementation
+ * but does have a reasonably efficient flush_vma_range() implementation
+ * use that.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+       if (tlb->fullmm || tlb->need_flush_all) {
+               flush_tlb_mm(tlb->mm);
+       } else if (tlb->end) {
+               struct vm_area_struct vma = {
+                       .vm_mm = tlb->mm,
+                       .vm_flags = (tlb->vma_exec ? VM_EXEC    : 0) |
+                                   (tlb->vma_huge ? VM_HUGETLB : 0),
+               };
+
+               flush_tlb_range(&vma, tlb->start, tlb->end);
+       }
 }
 
+static inline void
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+       /*
+        * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
+        * mips-4k) flush only large pages.
+        *
+        * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
+        * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
+        * range.
+        *
+        * We rely on tlb_end_vma() to issue a flush, such that when we reset
+        * these values the batch is empty.
+        */
+       tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB);
+       tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
+}
+
+#else
+
+static inline void
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+
+#endif
+
+#endif /* CONFIG_MMU_GATHER_NO_RANGE */
+
 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
        if (!tlb->end)
@@ -196,21 +418,18 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
        return tlb_remove_page_size(tlb, page, PAGE_SIZE);
 }
 
-#ifndef tlb_remove_check_page_size_change
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
+static inline void tlb_change_page_size(struct mmu_gather *tlb,
                                                     unsigned int page_size)
 {
-       /*
-        * We don't care about page size change, just update
-        * mmu_gather page size here so that debug checks
-        * doesn't throw false warning.
-        */
-#ifdef CONFIG_DEBUG_VM
+#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+       if (tlb->page_size && tlb->page_size != page_size) {
+               if (!tlb->fullmm)
+                       tlb_flush_mmu(tlb);
+       }
+
        tlb->page_size = page_size;
 #endif
 }
-#endif
 
 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
 {
@@ -237,17 +456,30 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
  * the vmas are adjusted to only cover the region to be torn down.
  */
 #ifndef tlb_start_vma
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#endif
+static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+       if (tlb->fullmm)
+               return;
 
-#define __tlb_end_vma(tlb, vma)                                        \
-       do {                                                    \
-               if (!tlb->fullmm)                               \
-                       tlb_flush_mmu_tlbonly(tlb);             \
-       } while (0)
+       tlb_update_vma_flags(tlb, vma);
+       flush_cache_range(vma, vma->vm_start, vma->vm_end);
+}
+#endif
 
 #ifndef tlb_end_vma
-#define tlb_end_vma    __tlb_end_vma
+static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+       if (tlb->fullmm)
+               return;
+
+       /*
+        * Do a TLB flush and reset the range at VMA boundaries; this avoids
+        * the ranges growing with the unused space between consecutive VMAs,
+        * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
+        * this.
+        */
+       tlb_flush_mmu_tlbonly(tlb);
+}
 #endif
 
 #ifndef __tlb_remove_tlb_entry
@@ -372,6 +604,4 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
 
 #endif /* CONFIG_MMU */
 
-#define tlb_migrate_finish(mm) do {} while (0)
-
 #endif /* _ASM_GENERIC__TLB_H */
index cfb7be40bed7a55a453757b30bf20772590b718d..ce4de6b1e444a855d04cc9afe5c7ebca5833a1a6 100644 (file)
@@ -418,6 +418,8 @@ struct drm_crtc_helper_funcs {
         * Drivers can use the @old_crtc_state input parameter if the operations
         * needed to enable the CRTC don't depend solely on the new state but
         * also on the transition between the old state and the new state.
+        *
+        * This function is optional.
         */
        void (*atomic_enable)(struct drm_crtc *crtc,
                              struct drm_crtc_state *old_crtc_state);
@@ -441,6 +443,8 @@ struct drm_crtc_helper_funcs {
         * parameter @old_crtc_state which could be used to access the old
         * state. Atomic drivers should consider to use this one instead
         * of @disable.
+        *
+        * This function is optional.
         */
        void (*atomic_disable)(struct drm_crtc *crtc,
                               struct drm_crtc_state *old_crtc_state);
index cbf3180cb612ed7e54b9aecf79b2690c5a48aed6..668ad971cd7b26828e2d95a4813ec3888d0abdac 100644 (file)
@@ -420,7 +420,6 @@ extern struct ttm_bo_global {
        /**
         * Protected by ttm_global_mutex.
         */
-       unsigned int use_count;
        struct list_head device_list;
 
        /**
diff --git a/include/dt-bindings/clock/sifive-fu540-prci.h b/include/dt-bindings/clock/sifive-fu540-prci.h
new file mode 100644 (file)
index 0000000..6a0b70a
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H
+#define __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H
+
+/* Clock indexes for use by Device Tree data and the PRCI driver */
+
+#define PRCI_CLK_COREPLL              0
+#define PRCI_CLK_DDRPLL                       1
+#define PRCI_CLK_GEMGXLPLL            2
+#define PRCI_CLK_TLCLK                3
+
+#endif
index 8063e8314eefbfbf75181622465a33f2fbe48ba8..6d487c5eba2cae612e58ef72b8712d8d97aa71b5 100644 (file)
 #define RESET_SD_EMMC_A                        44
 #define RESET_SD_EMMC_B                        45
 #define RESET_SD_EMMC_C                        46
-/*                                     47-60 */
+/*                                     47      */
+#define RESET_USB_PHY20                        48
+#define RESET_USB_PHY21                        49
+/*                                     50-60   */
 #define RESET_AUDIO_CODEC              61
 /*                                     62-63   */
 /*     RESET2                                  */
index adbcb681782604b5356a0b1db56a9a9af6177529..0071298b9b28eb41a313e7f29eb609bd84eb4785 100644 (file)
@@ -38,7 +38,7 @@ enum {
 
 int TSS_authhmac(unsigned char *digest, const unsigned char *key,
                        unsigned int keylen, unsigned char *h1,
-                       unsigned char *h2, unsigned char h3, ...);
+                       unsigned char *h2, unsigned int h3, ...);
 int TSS_checkhmac1(unsigned char *buffer,
                          const uint32_t command,
                          const unsigned char *ononce,
index d5cfc0b15b7640e6c4b99fc05b479f25f5df7f44..f6034ba774be313a86f006ee2e0fbe96403de6f6 100644 (file)
@@ -108,7 +108,7 @@ static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb)
 #define AARP_RESOLVE_TIME      (10 * HZ)
 
 extern struct datalink_proto *ddp_dl, *aarp_dl;
-extern void aarp_proto_init(void);
+extern int aarp_proto_init(void);
 
 /* Inter module exports */
 
index bb6090aa165d362ae399194fdca8d58b7cb8f5bf..e584673c18814295e2c901ab16d69511bc5e7a37 100644 (file)
@@ -120,19 +120,23 @@ static inline bool bio_full(struct bio *bio)
        return bio->bi_vcnt >= bio->bi_max_vecs;
 }
 
-#define mp_bvec_for_each_segment(bv, bvl, i, iter_all)                 \
-       for (bv = bvec_init_iter_all(&iter_all);                        \
-               (iter_all.done < (bvl)->bv_len) &&                      \
-               (mp_bvec_next_segment((bvl), &iter_all), 1);            \
-               iter_all.done += bv->bv_len, i += 1)
+static inline bool bio_next_segment(const struct bio *bio,
+                                   struct bvec_iter_all *iter)
+{
+       if (iter->idx >= bio->bi_vcnt)
+               return false;
+
+       bvec_advance(&bio->bi_io_vec[iter->idx], iter);
+       return true;
+}
 
 /*
  * drivers should _never_ use the all version - the bio may have been split
  * before it got to the driver and the driver won't own all of it
  */
-#define bio_for_each_segment_all(bvl, bio, i, iter_all)                \
-       for (i = 0, iter_all.idx = 0; iter_all.idx < (bio)->bi_vcnt; iter_all.idx++)    \
-               mp_bvec_for_each_segment(bvl, &((bio)->bi_io_vec[iter_all.idx]), i, iter_all)
+#define bio_for_each_segment_all(bvl, bio, i, iter)                    \
+       for (i = 0, bvl = bvec_init_iter_all(&iter);                    \
+            bio_next_segment((bio), &iter); i++)
 
 static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
                                    unsigned bytes)
index 50fb0dee23e8662120461cd227cf11f548939339..d35b8ec1c485cba58a658b34edd2f9621cd20639 100644 (file)
@@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x)
 
 #define __constant_bitrev32(x) \
 ({                                     \
-       u32 __x = x;                    \
-       __x = (__x >> 16) | (__x << 16);        \
-       __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8);      \
-       __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4);      \
-       __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2);      \
-       __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1);      \
-       __x;                                                            \
+       u32 ___x = x;                   \
+       ___x = (___x >> 16) | (___x << 16);     \
+       ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8);   \
+       ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4);   \
+       ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2);   \
+       ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1);   \
+       ___x;                                                           \
 })
 
 #define __constant_bitrev16(x) \
 ({                                     \
-       u16 __x = x;                    \
-       __x = (__x >> 8) | (__x << 8);  \
-       __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4);        \
-       __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2);        \
-       __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1);        \
-       __x;                                                            \
+       u16 ___x = x;                   \
+       ___x = (___x >> 8) | (___x << 8);       \
+       ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4);     \
+       ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2);     \
+       ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1);     \
+       ___x;                                                           \
 })
 
 #define __constant_bitrev8x4(x) \
 ({                     \
-       u32 __x = x;    \
-       __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4);      \
-       __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2);      \
-       __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1);      \
-       __x;                                                            \
+       u32 ___x = x;   \
+       ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4);   \
+       ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2);   \
+       ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1);   \
+       ___x;                                                           \
 })
 
 #define __constant_bitrev8(x)  \
 ({                                     \
-       u8 __x = x;                     \
-       __x = (__x >> 4) | (__x << 4);  \
-       __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2);      \
-       __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1);      \
-       __x;                                                            \
+       u8 ___x = x;                    \
+       ___x = (___x >> 4) | (___x << 4);       \
+       ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2);   \
+       ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1);   \
+       ___x;                                                           \
 })
 
 #define bitrev32(x) \
index b0c814bcc7e3ed903f6b78ca24885ce9c67cba08..db29928de46741b0887e6bd13ed577a59684d180 100644 (file)
@@ -57,7 +57,6 @@ struct blk_mq_hw_ctx {
        unsigned int            queue_num;
 
        atomic_t                nr_active;
-       unsigned int            nr_expired;
 
        struct hlist_node       cpuhp_dead;
        struct kobject          kobj;
@@ -300,11 +299,10 @@ void blk_mq_end_request(struct request *rq, blk_status_t error);
 void __blk_mq_end_request(struct request *rq, blk_status_t error);
 
 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
-                               bool kick_requeue_list);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
 bool blk_mq_complete_request(struct request *rq);
+void blk_mq_complete_request_sync(struct request *rq);
 bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
                           struct bio *bio);
 bool blk_mq_queue_stopped(struct request_queue *q);
index d66bf5f32610adce133e522b7f3852bd08817ff7..791fee35df8886d11580d4c54f4b55dae64de3db 100644 (file)
@@ -215,6 +215,7 @@ struct bio {
 /*
  * bio flags
  */
+#define BIO_NO_PAGE_REF        0       /* don't put release vec pages */
 #define BIO_SEG_VALID  1       /* bi_phys_segments valid */
 #define BIO_CLONED     2       /* doesn't own data */
 #define BIO_BOUNCED    3       /* bio is a bounce bio */
index 0de92b29f589c949307ef716a750fb7f7a671ae7..317ab30d29046baaa29d13d0213ef9c63c4bf95c 100644 (file)
@@ -50,6 +50,9 @@ struct blk_stat_callback;
 /* Must be consistent with blk_mq_poll_stats_bkt() */
 #define BLK_MQ_POLL_STATS_BKTS 16
 
+/* Doing classic polling */
+#define BLK_MQ_POLL_CLASSIC -1
+
 /*
  * Maximum number of blkcg policies allowed to be registered concurrently.
  * Defined here to simplify include dependency.
@@ -545,7 +548,6 @@ struct request_queue {
        struct rcu_head         rcu_head;
        wait_queue_head_t       mq_freeze_wq;
        struct percpu_ref       q_usage_counter;
-       struct list_head        all_q_node;
 
        struct blk_mq_tag_set   *tag_set;
        struct list_head        tag_set_list;
index a2132e09dc1c422731e9533a2a2efa2f5b0132dd..944ccc310201d461e73d738fe8e971aaf9e1b454 100644 (file)
@@ -193,7 +193,6 @@ enum bpf_arg_type {
 
        ARG_PTR_TO_CTX,         /* pointer to context */
        ARG_ANYTHING,           /* any (initialized) argument is ok */
-       ARG_PTR_TO_SOCKET,      /* pointer to bpf_sock */
        ARG_PTR_TO_SPIN_LOCK,   /* pointer to bpf_spin_lock */
        ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
 };
@@ -511,7 +510,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
                }                                       \
 _out:                                                  \
                rcu_read_unlock();                      \
-               preempt_enable_no_resched();            \
+               preempt_enable();                       \
                _ret;                                   \
         })
 
index 69f7a3449eda83a8a25fd1f5ac3dedc36108deba..7d8228d1c8981d9b73fb72a8953687c1f550eb19 100644 (file)
@@ -66,6 +66,46 @@ struct bpf_reg_state {
         * same reference to the socket, to determine proper reference freeing.
         */
        u32 id;
+       /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
+        * from a pointer-cast helper, bpf_sk_fullsock() and
+        * bpf_tcp_sock().
+        *
+        * Consider the following where "sk" is a reference counted
+        * pointer returned from "sk = bpf_sk_lookup_tcp();":
+        *
+        * 1: sk = bpf_sk_lookup_tcp();
+        * 2: if (!sk) { return 0; }
+        * 3: fullsock = bpf_sk_fullsock(sk);
+        * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
+        * 5: tp = bpf_tcp_sock(fullsock);
+        * 6: if (!tp) { bpf_sk_release(sk); return 0; }
+        * 7: bpf_sk_release(sk);
+        * 8: snd_cwnd = tp->snd_cwnd;  // verifier will complain
+        *
+        * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
+        * "tp" ptr should be invalidated also.  In order to do that,
+        * the reg holding "fullsock" and "sk" need to remember
+        * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
+        * such that the verifier can reset all regs which have
+        * ref_obj_id matching the sk_reg->id.
+        *
+        * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
+        * sk_reg->id will stay as NULL-marking purpose only.
+        * After NULL-marking is done, sk_reg->id can be reset to 0.
+        *
+        * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
+        * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
+        *
+        * After "tp = bpf_tcp_sock(fullsock);" at line 5,
+        * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
+        * which is the same as sk_reg->ref_obj_id.
+        *
+        * From the verifier perspective, if sk, fullsock and tp
+        * are not NULL, they are the same ptr with different
+        * reg->type.  In particular, bpf_sk_release(tp) is also
+        * allowed and has the same effect as bpf_sk_release(sk).
+        */
+       u32 ref_obj_id;
        /* For scalar types (SCALAR_VALUE), this represents our knowledge of
         * the actual value.
         * For pointer types, this represents the variable part of the offset
index 9cd00a37b8d32b83e3539ed5ffce694c63efc8f0..6db2d9a6e503106261e042e5d65fdc68aa92194b 100644 (file)
 #define BCM_LED_SRC_OFF                0xe     /* Tied high */
 #define BCM_LED_SRC_ON         0xf     /* Tied low */
 
+/*
+ * Broadcom Multicolor LED configurations (expansion register 4)
+ */
+#define BCM_EXP_MULTICOLOR             (MII_BCM54XX_EXP_SEL_ER + 0x04)
+#define BCM_LED_MULTICOLOR_IN_PHASE    BIT(8)
+#define BCM_LED_MULTICOLOR_LINK_ACT    0x0
+#define BCM_LED_MULTICOLOR_SPEED       0x1
+#define BCM_LED_MULTICOLOR_ACT_FLASH   0x2
+#define BCM_LED_MULTICOLOR_FDX         0x3
+#define BCM_LED_MULTICOLOR_OFF         0x4
+#define BCM_LED_MULTICOLOR_ON          0x5
+#define BCM_LED_MULTICOLOR_ALT         0x6
+#define BCM_LED_MULTICOLOR_FLASH       0x7
+#define BCM_LED_MULTICOLOR_LINK                0x8
+#define BCM_LED_MULTICOLOR_ACT         0x9
+#define BCM_LED_MULTICOLOR_PROGRAM     0xa
 
 /*
  * BCM5482: Shadow registers
index f6275c4da13a765fd60f3d34a9ca491d1d56db93..ff13cbc1887db7a3e2d4fe9f64916c34a8a44a49 100644 (file)
@@ -145,26 +145,33 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
 
 static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all)
 {
-       iter_all->bv.bv_page = NULL;
        iter_all->done = 0;
+       iter_all->idx = 0;
 
        return &iter_all->bv;
 }
 
-static inline void mp_bvec_next_segment(const struct bio_vec *bvec,
-                                       struct bvec_iter_all *iter_all)
+static inline void bvec_advance(const struct bio_vec *bvec,
+                               struct bvec_iter_all *iter_all)
 {
        struct bio_vec *bv = &iter_all->bv;
 
-       if (bv->bv_page) {
+       if (iter_all->done) {
                bv->bv_page = nth_page(bv->bv_page, 1);
                bv->bv_offset = 0;
        } else {
-               bv->bv_page = bvec->bv_page;
-               bv->bv_offset = bvec->bv_offset;
+               bv->bv_page = bvec_nth_page(bvec->bv_page, bvec->bv_offset /
+                                           PAGE_SIZE);
+               bv->bv_offset = bvec->bv_offset & ~PAGE_MASK;
        }
        bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset,
                           bvec->bv_len - iter_all->done);
+       iter_all->done += bv->bv_len;
+
+       if (iter_all->done == bvec->bv_len) {
+               iter_all->idx++;
+               iter_all->done = 0;
+       }
 }
 
 /*
index a420c07904bcd7f6e7a082ebe0e78d913f94674e..337d5049ff93b5849b925086f33d509f29da5efa 100644 (file)
@@ -294,6 +294,8 @@ extern void ceph_destroy_client(struct ceph_client *client);
 extern int __ceph_open_session(struct ceph_client *client,
                               unsigned long started);
 extern int ceph_open_session(struct ceph_client *client);
+int ceph_wait_for_latest_osdmap(struct ceph_client *client,
+                               unsigned long timeout);
 
 /* pagevec.c */
 extern void ceph_release_page_vector(struct page **pages, int num_pages);
index d8bc1a856b39c88c0731ceed0d93ef80a4bbb16a..f689fc58d7be3bf8f1841f532227bddce0cb5d52 100644 (file)
@@ -811,6 +811,22 @@ static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
        return true;
 }
 
+static inline int clk_set_rate_range(struct clk *clk, unsigned long min,
+                                    unsigned long max)
+{
+       return 0;
+}
+
+static inline int clk_set_min_rate(struct clk *clk, unsigned long rate)
+{
+       return 0;
+}
+
+static inline int clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+       return 0;
+}
+
 static inline int clk_set_parent(struct clk *clk, struct clk *parent)
 {
        return 0;
index 445348facea97d2755f371198ca5730302c3f1ba..d58aa0db05f9438cbe3639a7d47788b6acdd4f33 100644 (file)
@@ -67,7 +67,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
                                .line = __LINE__,                       \
                        };                                              \
                ______r = !!(cond);                                     \
-               ______f.miss_hit[______r]++;                                    \
+               ______r ? ______f.miss_hit[1]++ : ______f.miss_hit[0]++;\
                ______r;                                                \
        }))
 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
index 5041357d0297afdce8b5605970947b34f85e7205..732745f865b7e02f94832c128f0d8b4a83ea2ed9 100644 (file)
@@ -137,9 +137,26 @@ static inline int disable_nonboot_cpus(void)
        return freeze_secondary_cpus(0);
 }
 extern void enable_nonboot_cpus(void);
+
+static inline int suspend_disable_secondary_cpus(void)
+{
+       int cpu = 0;
+
+       if (IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU))
+               cpu = -1;
+
+       return freeze_secondary_cpus(cpu);
+}
+static inline void suspend_enable_secondary_cpus(void)
+{
+       return enable_nonboot_cpus();
+}
+
 #else /* !CONFIG_PM_SLEEP_SMP */
 static inline int disable_nonboot_cpus(void) { return 0; }
 static inline void enable_nonboot_cpus(void) {}
+static inline int suspend_disable_secondary_cpus(void) { return 0; }
+static inline void suspend_enable_secondary_cpus(void) { }
 #endif /* !CONFIG_PM_SLEEP_SMP */
 
 void cpu_startup_entry(enum cpuhp_state state);
@@ -175,6 +192,7 @@ enum cpuhp_smt_control {
        CPU_SMT_DISABLED,
        CPU_SMT_FORCE_DISABLED,
        CPU_SMT_NOT_SUPPORTED,
+       CPU_SMT_NOT_IMPLEMENTED,
 };
 
 #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
@@ -182,9 +200,33 @@ extern enum cpuhp_smt_control cpu_smt_control;
 extern void cpu_smt_disable(bool force);
 extern void cpu_smt_check_topology(void);
 #else
-# define cpu_smt_control               (CPU_SMT_ENABLED)
+# define cpu_smt_control               (CPU_SMT_NOT_IMPLEMENTED)
 static inline void cpu_smt_disable(bool force) { }
 static inline void cpu_smt_check_topology(void) { }
 #endif
 
+/*
+ * These are used for a global "mitigations=" cmdline option for toggling
+ * optional CPU mitigations.
+ */
+enum cpu_mitigations {
+       CPU_MITIGATIONS_OFF,
+       CPU_MITIGATIONS_AUTO,
+       CPU_MITIGATIONS_AUTO_NOSMT,
+};
+
+extern enum cpu_mitigations cpu_mitigations;
+
+/* mitigations=off */
+static inline bool cpu_mitigations_off(void)
+{
+       return cpu_mitigations == CPU_MITIGATIONS_OFF;
+}
+
+/* mitigations=auto,nosmt */
+static inline bool cpu_mitigations_auto_nosmt(void)
+{
+       return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
+}
+
 #endif /* _LINUX_CPU_H_ */
index b425a7ee04ce4d2a56f4ed951ae4c7f6779818fd..4e6987e11f688bc12001cc4d030e8a3a99faa514 100644 (file)
@@ -49,8 +49,6 @@ struct bus_attribute {
        ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
 };
 
-#define BUS_ATTR(_name, _mode, _show, _store)  \
-       struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
 #define BUS_ATTR_RW(_name) \
        struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
 #define BUS_ATTR_RO(_name) \
index c46fdb36700bc2d83115245e660853ffc5a47141..8de8c4f15163a9ecc7e9459df85976b1eff0b614 100644 (file)
@@ -102,9 +102,7 @@ const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list);
 extern const char * dmi_get_system_info(int field);
 extern const struct dmi_device * dmi_find_device(int type, const char *name,
        const struct dmi_device *from);
-extern void dmi_scan_machine(void);
-extern void dmi_memdev_walk(void);
-extern void dmi_set_dump_stack_arch_desc(void);
+extern void dmi_setup(void);
 extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp);
 extern int dmi_get_bios_year(void);
 extern int dmi_name_in_vendors(const char *str);
@@ -122,9 +120,7 @@ static inline int dmi_check_system(const struct dmi_system_id *list) { return 0;
 static inline const char * dmi_get_system_info(int field) { return NULL; }
 static inline const struct dmi_device * dmi_find_device(int type, const char *name,
        const struct dmi_device *from) { return NULL; }
-static inline void dmi_scan_machine(void) { return; }
-static inline void dmi_memdev_walk(void) { }
-static inline void dmi_set_dump_stack_arch_desc(void) { }
+static inline void dmi_setup(void) { }
 static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
 {
        if (yearp)
index 54357a258b358aa3961151c025cfef621ce5cbef..6ebc2098cfe1719a16ba177c567ed5a916955bb4 100644 (file)
@@ -1611,7 +1611,12 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
                           struct screen_info *si, efi_guid_t *proto,
                           unsigned long size);
 
-bool efi_runtime_disabled(void);
+#ifdef CONFIG_EFI
+extern bool efi_runtime_disabled(void);
+#else
+static inline bool efi_runtime_disabled(void) { return true; }
+#endif
+
 extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
 extern unsigned long efi_call_virt_save_flags(void);
 
index 2e9e2763bf47dbea239976034e32fd11f14826f7..6e8bc53740f050f63883ea6b7d077e5911bdca9f 100644 (file)
@@ -31,6 +31,7 @@ struct elevator_mq_ops {
        void (*exit_sched)(struct elevator_queue *);
        int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
        void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+       void (*depth_updated)(struct blk_mq_hw_ctx *);
 
        bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
        bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
index e2f3b21cd72a28d16cf2324d308e13dc64c86f59..aa8bfd6f738c7fac838b31de87ec390cf0a81d3c 100644 (file)
@@ -448,6 +448,18 @@ static inline void eth_addr_dec(u8 *addr)
        u64_to_ether_addr(u, addr);
 }
 
+/**
+ * eth_addr_inc() - Increment the given MAC address.
+ * @addr: Pointer to a six-byte array containing Ethernet address to increment.
+ */
+static inline void eth_addr_inc(u8 *addr)
+{
+       u64 u = ether_addr_to_u64(addr);
+
+       u++;
+       u64_to_ether_addr(u, addr);
+}
+
 /**
  * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
  * @dev: Pointer to a device structure
index 8b42df09b04c9c222e3fb863daf60bef0b116a7f..dd28e7679089128a75d5ed86f5f6f435422d77eb 100644 (file)
@@ -158,6 +158,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 #define FMODE_OPENED           ((__force fmode_t)0x80000)
 #define FMODE_CREATED          ((__force fmode_t)0x100000)
 
+/* File is stream-like */
+#define FMODE_STREAM           ((__force fmode_t)0x200000)
+
 /* File was opened by fanotify and shouldn't generate fanotify events */
 #define FMODE_NONOTIFY         ((__force fmode_t)0x4000000)
 
@@ -3074,6 +3077,7 @@ extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
 extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
 extern int generic_file_open(struct inode * inode, struct file * filp);
 extern int nonseekable_open(struct inode * inode, struct file * filp);
+extern int stream_open(struct inode * inode, struct file * filp);
 
 #ifdef CONFIG_BLOCK
 typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
index 730876187344a2904896dfa2bbf6b8459736c177..20899919ead8ade88ee64346ccb1c362018ebf20 100644 (file)
@@ -241,21 +241,11 @@ static inline void ftrace_free_mem(struct module *mod, void *start, void *end) {
 
 #ifdef CONFIG_STACK_TRACER
 
-#define STACK_TRACE_ENTRIES 500
-
-struct stack_trace;
-
-extern unsigned stack_trace_index[];
-extern struct stack_trace stack_trace_max;
-extern unsigned long stack_trace_max_size;
-extern arch_spinlock_t stack_trace_max_lock;
-
 extern int stack_tracer_enabled;
-void stack_trace_print(void);
-int
-stack_trace_sysctl(struct ctl_table *table, int write,
-                  void __user *buffer, size_t *lenp,
-                  loff_t *ppos);
+
+int stack_trace_sysctl(struct ctl_table *table, int write,
+                      void __user *buffer, size_t *lenp,
+                      loff_t *ppos);
 
 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
 DECLARE_PER_CPU(int, disable_stack_tracer);
index ea35263eb76b76e796f2f3ffaa3378585b98db82..11943b60f2084cb5e69c6dd5553f2adf0ea4f8ca 100644 (file)
@@ -203,7 +203,6 @@ static inline void hugetlb_show_meminfo(void)
 #define pud_huge(x)    0
 #define is_hugepage_only_range(mm, addr, len)  0
 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
-#define hugetlb_fault(mm, vma, addr, flags)    ({ BUG(); 0; })
 #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
                                src_addr, pagep)        ({ BUG(); 0; })
 #define huge_pte_offset(mm, address, sz)       0
@@ -234,6 +233,13 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
 {
        BUG();
 }
+static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
+                               struct vm_area_struct *vma, unsigned long address,
+                               unsigned int flags)
+{
+       BUG();
+       return 0;
+}
 
 #endif /* !CONFIG_HUGETLB_PAGE */
 /*
index d6160d479b14520485b3d2ba2c1bafb56491d2ee..7ae8de5ad0f2fae4a5c50ad30903ecdf631bb0fd 100644 (file)
@@ -195,7 +195,7 @@ struct irq_data {
  * IRQD_LEVEL                  - Interrupt is level triggered
  * IRQD_WAKEUP_STATE           - Interrupt is configured for wakeup
  *                               from suspend
- * IRDQ_MOVE_PCNTXT            - Interrupt can be moved in process
+ * IRQD_MOVE_PCNTXT            - Interrupt can be moved in process
  *                               context
  * IRQD_IRQ_DISABLED           - Disabled state of the interrupt
  * IRQD_IRQ_MASKED             - Masked state of the interrupt
index 626179077bb03d1b97ea7e2f42c95b2a4e79b1a0..0f049b384ccddf7325eb5449c82eda9d8dab63cb 100644 (file)
@@ -158,8 +158,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq);
  * Legacy platforms not converted to DT yet must use this to init
  * their GIC
  */
-void gic_init(unsigned int nr, int start,
-             void __iomem *dist , void __iomem *cpu);
+void gic_init(void __iomem *dist , void __iomem *cpu);
 
 int gicv2m_init(struct fwnode_handle *parent_handle,
                struct irq_domain *parent);
index a49f2b45b3f0ff4965dc1e0ea6bc49707759eaed..42710d5949ba3654a4812d7050ce97c1c218a610 100644 (file)
@@ -12,21 +12,79 @@ struct static_key_deferred {
        struct delayed_work work;
 };
 
-extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
-extern void static_key_deferred_flush(struct static_key_deferred *key);
+struct static_key_true_deferred {
+       struct static_key_true key;
+       unsigned long timeout;
+       struct delayed_work work;
+};
+
+struct static_key_false_deferred {
+       struct static_key_false key;
+       unsigned long timeout;
+       struct delayed_work work;
+};
+
+#define static_key_slow_dec_deferred(x)                                        \
+       __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout)
+#define static_branch_slow_dec_deferred(x)                             \
+       __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout)
+
+#define static_key_deferred_flush(x)                                   \
+       __static_key_deferred_flush((x), &(x)->work)
+
+extern void
+__static_key_slow_dec_deferred(struct static_key *key,
+                              struct delayed_work *work,
+                              unsigned long timeout);
+extern void __static_key_deferred_flush(void *key, struct delayed_work *work);
 extern void
 jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
 
+extern void jump_label_update_timeout(struct work_struct *work);
+
+#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl)                      \
+       struct static_key_true_deferred name = {                        \
+               .key =          { STATIC_KEY_INIT_TRUE },               \
+               .timeout =      (rl),                                   \
+               .work = __DELAYED_WORK_INITIALIZER((name).work,         \
+                                                  jump_label_update_timeout, \
+                                                  0),                  \
+       }
+
+#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl)                     \
+       struct static_key_false_deferred name = {                       \
+               .key =          { STATIC_KEY_INIT_FALSE },              \
+               .timeout =      (rl),                                   \
+               .work = __DELAYED_WORK_INITIALIZER((name).work,         \
+                                                  jump_label_update_timeout, \
+                                                  0),                  \
+       }
+
+#define static_branch_deferred_inc(x)  static_branch_inc(&(x)->key)
+
 #else  /* !CONFIG_JUMP_LABEL */
 struct static_key_deferred {
        struct static_key  key;
 };
+struct static_key_true_deferred {
+       struct static_key_true key;
+};
+struct static_key_false_deferred {
+       struct static_key_false key;
+};
+#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl)      \
+       struct static_key_true_deferred name = { STATIC_KEY_TRUE_INIT }
+#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl)     \
+       struct static_key_false_deferred name = { STATIC_KEY_FALSE_INIT }
+
+#define static_branch_slow_dec_deferred(x)     static_branch_dec(&(x)->key)
+
 static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
 {
        STATIC_KEY_CHECK_USE(key);
        static_key_slow_dec(&key->key);
 }
-static inline void static_key_deferred_flush(struct static_key_deferred *key)
+static inline void static_key_deferred_flush(void *key)
 {
        STATIC_KEY_CHECK_USE(key);
 }
index 8c3f8c14eeaafd5b1035f4ef2fec81384f94f77c..da676cdbd7277e32feb96a56063167afc0964215 100644 (file)
@@ -38,22 +38,13 @@ struct vmcoredd_node {
 
 #ifdef CONFIG_PROC_KCORE
 void __init kclist_add(struct kcore_list *, void *, size_t, int type);
-static inline
-void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
-{
-       m->vaddr = (unsigned long)vaddr;
-       kclist_add(m, addr, sz, KCORE_REMAP);
-}
+
+extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
 #else
 static inline
 void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
 {
 }
-
-static inline
-void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
-{
-}
 #endif
 
 #endif /* _LINUX_KCORE_H */
index 34a5036debd341935a100b6fe4a7083db6262d5e..2d14e21c16c0b412535d00b3a537bf3353366d4a 100644 (file)
@@ -47,8 +47,8 @@
 
 #define u64_to_user_ptr(x) (           \
 {                                      \
-       typecheck(u64, x);              \
-       (void __user *)(uintptr_t)x;    \
+       typecheck(u64, (x));            \
+       (void __user *)(uintptr_t)(x);  \
 }                                      \
 )
 
index 201f0f2683f25bd382267042f9f7dbec8460f093..9a897256e481f311a1de41e448c52710d2c2f247 100644 (file)
@@ -173,6 +173,7 @@ struct kretprobe_instance {
        struct kretprobe *rp;
        kprobe_opcode_t *ret_addr;
        struct task_struct *task;
+       void *fp;
        char data[0];
 };
 
index 9d55c63db09b5dcb9ac997d802cb00ff356d4353..640a03642766bb4ae02c86e3606318c80adaf81d 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/irqbypass.h>
 #include <linux/swait.h>
 #include <linux/refcount.h>
+#include <linux/nospec.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -513,10 +514,10 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
 
 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
 {
-       /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
-        * the caller has read kvm->online_vcpus before (as is the case
-        * for kvm_for_each_vcpu, for example).
-        */
+       int num_vcpus = atomic_read(&kvm->online_vcpus);
+       i = array_index_nospec(i, num_vcpus);
+
+       /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu.  */
        smp_rmb();
        return kvm->vcpus[i];
 }
@@ -600,6 +601,7 @@ void kvm_put_kvm(struct kvm *kvm);
 
 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
 {
+       as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
        return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
                        lockdep_is_held(&kvm->slots_lock) ||
                        !refcount_read(&kvm->users_count));
index 79626b5ab36cce2492a406c3ff4f50023c4c2150..58aa3adf94e63585631876b9a880c454fbd905df 100644 (file)
@@ -207,7 +207,7 @@ static inline void list_bulk_move_tail(struct list_head *head,
 }
 
 /**
- * list_is_first -- tests whether @ list is the first entry in list @head
+ * list_is_first -- tests whether @list is the first entry in list @head
  * @list: the entry to test
  * @head: the head of the list
  */
index 79c3873d58acc81b8de2e0e3d25e503a6e9deef3..6e2377e6c1d6013d01457b452952cc038be0e07b 100644 (file)
@@ -66,6 +66,11 @@ struct lock_class_key {
 
 extern struct lock_class_key __lockdep_no_validate__;
 
+struct lock_trace {
+       unsigned int            nr_entries;
+       unsigned int            offset;
+};
+
 #define LOCKSTAT_POINTS                4
 
 /*
@@ -100,7 +105,7 @@ struct lock_class {
         * IRQ/softirq usage tracking bits:
         */
        unsigned long                   usage_mask;
-       struct stack_trace              usage_traces[XXX_LOCK_USAGE_STATES];
+       struct lock_trace               usage_traces[XXX_LOCK_USAGE_STATES];
 
        /*
         * Generation counter, when doing certain classes of graph walking,
@@ -188,7 +193,7 @@ struct lock_list {
        struct list_head                entry;
        struct lock_class               *class;
        struct lock_class               *links_to;
-       struct stack_trace              trace;
+       struct lock_trace               trace;
        int                             distance;
 
        /*
@@ -471,7 +476,7 @@ struct pin_cookie { };
 
 #define NIL_COOKIE (struct pin_cookie){ }
 
-#define lockdep_pin_lock(l)                    ({ struct pin_cookie cookie; cookie; })
+#define lockdep_pin_lock(l)                    ({ struct pin_cookie cookie = { }; cookie; })
 #define lockdep_repin_lock(l, c)               do { (void)(l); (void)(c); } while (0)
 #define lockdep_unpin_lock(l, c)               do { (void)(l); (void)(c); } while (0)
 
index 1f3d880b7ca1736057546bc0b98997b6b8a65aea..dbb6118370c1e3c6df88b7da5695aea57dcd08ad 100644 (file)
@@ -566,7 +566,10 @@ struct mem_cgroup *lock_page_memcg(struct page *page);
 void __unlock_page_memcg(struct mem_cgroup *memcg);
 void unlock_page_memcg(struct page *page);
 
-/* idx can be of type enum memcg_stat_item or node_stat_item */
+/*
+ * idx can be of type enum memcg_stat_item or node_stat_item.
+ * Keep in sync with memcg_exact_page_state().
+ */
 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
                                             int idx)
 {
index 6fee8b1a4400842a7db69b0a08d6bc3edf436854..5cd824c1c0caa8c9adda4a8e6d640f43605cd4fb 100644 (file)
@@ -469,7 +469,7 @@ static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
        if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
                              advertising))
                lcl_adv |= ADVERTISE_PAUSE_CAP;
-       if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
                              advertising))
                lcl_adv |= ADVERTISE_PAUSE_ASYM;
 
index 022541dc5dbfd7b12a54601c1d1a59e30eed8a37..0d07296488448b28b608cc7795de638aef185ba6 100644 (file)
@@ -594,6 +594,8 @@ enum mlx5_pagefault_type_flags {
 };
 
 struct mlx5_td {
+       /* protects tirs list changes while tirs refresh */
+       struct mutex     list_lock;
        struct list_head tirs_list;
        u32              tdn;
 };
index b26ea90773840eb9a6b127abc56b9334c67fb9a6..0343c81d4c5f5d7d994718c1f645014241314b2a 100644 (file)
@@ -557,7 +557,8 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
 
 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
                         struct mlx5_core_dct *qp,
-                        u32 *in, int inlen);
+                        u32 *in, int inlen,
+                        u32 *out, int outlen);
 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
                        struct mlx5_core_qp *qp,
                        u32 *in,
index 76769749b5a5d546daf7f7513318ad65a494da4c..6b10c21630f54bdd14ddd6efa1510777165ec558 100644 (file)
@@ -966,6 +966,10 @@ static inline bool is_pci_p2pdma_page(const struct page *page)
 }
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
 
+/* 127: arbitrary random number, small enough to assemble well */
+#define page_ref_zero_or_close_to_overflow(page) \
+       ((unsigned int) page_ref_count(page) + 127u <= 127u)
+
 static inline void get_page(struct page *page)
 {
        page = compound_head(page);
@@ -973,8 +977,17 @@ static inline void get_page(struct page *page)
         * Getting a normal page or the head of a compound page
         * requires to already have an elevated page->_refcount.
         */
-       VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
+       VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
+       page_ref_inc(page);
+}
+
+static inline __must_check bool try_get_page(struct page *page)
+{
+       page = compound_head(page);
+       if (WARN_ON_ONCE(page_ref_count(page) <= 0))
+               return false;
        page_ref_inc(page);
+       return true;
 }
 
 static inline void put_page(struct page *page)
index 7eade9132f02e4f85f423404341c6241d335f4d4..4ef4bbe78a1da163fee585597b57ff215c819be6 100644 (file)
@@ -671,7 +671,7 @@ enum vm_fault_reason {
 
 /* Encode hstate index for a hwpoisoned large page */
 #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
-#define VM_FAULT_GET_HINDEX(x) (((x) >> 16) & 0xf)
+#define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
 
 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS |       \
                        VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON |  \
index 651fca72286c4838307b8fe83d78bbda2dc81015..c606c72311d0e08564b274fba3aaa2d2cb3a6e7c 100644 (file)
@@ -83,6 +83,12 @@ enum sock_type {
 
 #endif /* ARCH_HAS_SOCKET_TYPES */
 
+/**
+ * enum sock_shutdown_cmd - Shutdown types
+ * @SHUT_RD: shutdown receptions
+ * @SHUT_WR: shutdown transmissions
+ * @SHUT_RDWR: shutdown receptions/transmissions
+ */
 enum sock_shutdown_cmd {
        SHUT_RD,
        SHUT_WR,
index 26f69cf763f43dd1e0d61c0692f649aab21baeea..324e872c91d15407b6804c297de56cd480b215f8 100644 (file)
@@ -1500,6 +1500,7 @@ struct net_device_ops {
  * @IFF_FAILOVER: device is a failover master device
  * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
  * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
+ * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
  */
 enum netdev_priv_flags {
        IFF_802_1Q_VLAN                 = 1<<0,
@@ -1532,6 +1533,7 @@ enum netdev_priv_flags {
        IFF_FAILOVER                    = 1<<27,
        IFF_FAILOVER_SLAVE              = 1<<28,
        IFF_L3MDEV_RX_HANDLER           = 1<<29,
+       IFF_LIVE_RENAME_OK              = 1<<30,
 };
 
 #define IFF_802_1Q_VLAN                        IFF_802_1Q_VLAN
@@ -1563,6 +1565,7 @@ enum netdev_priv_flags {
 #define IFF_FAILOVER                   IFF_FAILOVER
 #define IFF_FAILOVER_SLAVE             IFF_FAILOVER_SLAVE
 #define IFF_L3MDEV_RX_HANDLER          IFF_L3MDEV_RX_HANDLER
+#define IFF_LIVE_RENAME_OK             IFF_LIVE_RENAME_OK
 
 /**
  *     struct net_device - The DEVICE structure.
index baa49e6a23cc7a16092962e4d4a6bfbba9132376..c40720cb59acc4190d40aafdfe23b42ef8159cf9 100644 (file)
@@ -967,8 +967,13 @@ struct nvme_get_log_page_command {
        __le16                  numdl;
        __le16                  numdu;
        __u16                   rsvd11;
-       __le32                  lpol;
-       __le32                  lpou;
+       union {
+               struct {
+                       __le32 lpol;
+                       __le32 lpou;
+               };
+               __le64 lpo;
+       };
        __u32                   rsvd14[2];
 };
 
index 4eb26d2780460a6a821f110ce903ad2a1a54ccce..280ae96dc4c300d29418ececeb5e9b91acf27e2e 100644 (file)
@@ -41,16 +41,6 @@ int move_freepages_block(struct zone *zone, struct page *page,
 
 /*
  * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
- * If specified range includes migrate types other than MOVABLE or CMA,
- * this will fail with -EBUSY.
- *
- * For isolating all pages in the range finally, the caller have to
- * free all pages in the range. test_page_isolated() can be used for
- * test it.
- *
- * The following flags are allowed (they can be combined in a bit mask)
- * SKIP_HWPOISON - ignore hwpoison pages
- * REPORT_FAILURE - report details about the failure to isolate the range
  */
 int
 start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
index f41f1d041e2c5e325e4d817781e13ed60695276c..397607a0c0ebef2969a3c4ea81893be860c8aacc 100644 (file)
@@ -460,7 +460,6 @@ extern size_t parport_ieee1284_epp_read_addr (struct parport *,
                                              void *, size_t, int);
 
 /* IEEE1284.3 functions */
-#define daisy_dev_name "Device ID probe"
 extern int parport_daisy_init (struct parport *port);
 extern void parport_daisy_fini (struct parport *port);
 extern struct pardevice *parport_open (int devnum, const char *name);
@@ -469,18 +468,6 @@ extern ssize_t parport_device_id (int devnum, char *buffer, size_t len);
 extern void parport_daisy_deselect_all (struct parport *port);
 extern int parport_daisy_select (struct parport *port, int daisy, int mode);
 
-#ifdef CONFIG_PARPORT_1284
-extern int daisy_drv_init(void);
-extern void daisy_drv_exit(void);
-#else
-static inline int daisy_drv_init(void)
-{
-       return 0;
-}
-
-static inline void daisy_drv_exit(void) {}
-#endif
-
 /* Lowlevel drivers _can_ call this support function to handle irqs.  */
 static inline void parport_generic_irq(struct parport *port)
 {
index e47ef764f613ed5231121e8347575e37a53d5b1d..15a82ff0aefe8be73ffe262cd76d7c7ce3ff7a00 100644 (file)
@@ -240,7 +240,6 @@ struct perf_event;
 #define PERF_PMU_CAP_NO_INTERRUPT              0x01
 #define PERF_PMU_CAP_NO_NMI                    0x02
 #define PERF_PMU_CAP_AUX_NO_SG                 0x04
-#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF          0x08
 #define PERF_PMU_CAP_EXCLUSIVE                 0x10
 #define PERF_PMU_CAP_ITRACE                    0x20
 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS                0x40
@@ -464,7 +463,7 @@ enum perf_addr_filter_action_t {
 /**
  * struct perf_addr_filter - address range filter definition
  * @entry:     event's filter list linkage
- * @inode:     object file's inode for file-based filters
+ * @path:      object file's path for file-based filters
  * @offset:    filter range offset
  * @size:      filter range size (size==0 means single address trigger)
  * @action:    filter/start/stop
@@ -888,6 +887,9 @@ extern void perf_sched_cb_dec(struct pmu *pmu);
 extern void perf_sched_cb_inc(struct pmu *pmu);
 extern int perf_event_task_disable(void);
 extern int perf_event_task_enable(void);
+
+extern void perf_pmu_resched(struct pmu *pmu);
+
 extern int perf_event_refresh(struct perf_event *event, int refresh);
 extern void perf_event_update_userpage(struct perf_event *event);
 extern int perf_event_release_kernel(struct perf_event *event);
@@ -1055,12 +1057,18 @@ static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned lo
 #endif
 
 /*
- * Take a snapshot of the regs. Skip ip and frame pointer to
- * the nth caller. We only need a few of the regs:
+ * When generating a perf sample in-line, instead of from an interrupt /
+ * exception, we lack a pt_regs. This is typically used from software events
+ * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
+ *
+ * We typically don't need a full set, but (for x86) do require:
  * - ip for PERF_SAMPLE_IP
  * - cs for user_mode() tests
- * - bp for callchains
- * - eflags, for future purposes, just in case
+ * - sp for PERF_SAMPLE_CALLCHAIN
+ * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
+ *
+ * NOTE: assumes @regs is otherwise already 0 filled; this is important for
+ * things like PERF_SAMPLE_REGS_INTR.
  */
 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
 {
index 787d224ff43e1fc72ceed0164bf26a4f2fa08794..5c626fdc10dbd27d6f87f290cf5dbd50d0244528 100644 (file)
@@ -101,18 +101,20 @@ struct pipe_buf_operations {
        /*
         * Get a reference to the pipe buffer.
         */
-       void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+       bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
 };
 
 /**
  * pipe_buf_get - get a reference to a pipe_buffer
  * @pipe:      the pipe that the buffer belongs to
  * @buf:       the buffer to get a reference to
+ *
+ * Return: %true if the reference was successfully obtained.
  */
-static inline void pipe_buf_get(struct pipe_inode_info *pipe,
+static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe,
                                struct pipe_buffer *buf)
 {
-       buf->ops->get(pipe, buf);
+       return buf->ops->get(pipe, buf);
 }
 
 /**
@@ -171,9 +173,10 @@ struct pipe_inode_info *alloc_pipe_info(void);
 void free_pipe_info(struct pipe_inode_info *);
 
 /* Generic pipe buffer ops functions */
-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
 void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
 void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
 
index a867637e172d75cbe77a6593309c30cb82a1d197..9e46678edb2aff1e5f0a0d2247e969829551adca 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL+ */
+/* SPDX-License-Identifier: GPL-2.0+ */
 
 /*
  * AMD FCH gpio driver platform-data
index 3ab892208343c2d22d71a630f1097d6941534518..7a37ac27d0fb21d9f8afde973c7618e42574534b 100644 (file)
@@ -35,10 +35,13 @@ struct pmc_clk {
  *
  * @base:      PMC clock register base offset
  * @clks:      pointer to set of registered clocks, typically 0..5
+ * @critical:  flag to indicate if firmware enabled pmc_plt_clks
+ *             should be marked as critial or not
  */
 struct pmc_clk_data {
        void __iomem *base;
        const struct pmc_clk *clks;
+       bool critical;
 };
 
 #endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */
index edb9b040c94c31fe1ff91242232c10d0aea514ac..d5084ebd9f03045e7706872d0db88c3b06d88847 100644 (file)
@@ -9,6 +9,13 @@
 #include <linux/bug.h>                 /* For BUG_ON.  */
 #include <linux/pid_namespace.h>       /* For task_active_pid_ns.  */
 #include <uapi/linux/ptrace.h>
+#include <linux/seccomp.h>
+
+/* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */
+struct syscall_info {
+       __u64                   sp;
+       struct seccomp_data     data;
+};
 
 extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
                            void *buf, int len, unsigned int gup_flags);
@@ -407,9 +414,7 @@ static inline void user_single_step_report(struct pt_regs *regs)
 #define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
 #endif
 
-extern int task_current_syscall(struct task_struct *target, long *callno,
-                               unsigned long args[6], unsigned int maxargs,
-                               unsigned long *sp, unsigned long *pc);
+extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
 
 extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
 #endif
index 6cdb1db776cf9a634673277363b116a7fc7f8093..922bb6848813301c29211f0aca1dbfe79de02f4c 100644 (file)
@@ -878,9 +878,11 @@ static inline void rcu_head_init(struct rcu_head *rhp)
 static inline bool
 rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
 {
-       if (READ_ONCE(rhp->func) == f)
+       rcu_callback_t func = READ_ONCE(rhp->func);
+
+       if (func == f)
                return true;
-       WARN_ON_ONCE(READ_ONCE(rhp->func) != (rcu_callback_t)~0L);
+       WARN_ON_ONCE(func != (rcu_callback_t)~0L);
        return false;
 }
 
index 90bfa3279a01c555746ea3fc1a80b9e6c6523426..563290fc194f247d92ab2cd430fcf99d8706f725 100644 (file)
@@ -18,7 +18,7 @@
  * awoken.
  */
 struct rcuwait {
-       struct task_struct *task;
+       struct task_struct __rcu *task;
 };
 
 #define __RCUWAIT_INITIALIZER(name)            \
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
deleted file mode 100644 (file)
index e475683..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* rwsem-spinlock.h: fallback C implementation
- *
- * Copyright (c) 2001   David Howells (dhowells@redhat.com).
- * - Derived partially from ideas by Andrea Arcangeli <andrea@suse.de>
- * - Derived also from comments by Linus
- */
-
-#ifndef _LINUX_RWSEM_SPINLOCK_H
-#define _LINUX_RWSEM_SPINLOCK_H
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-/*
- * the rw-semaphore definition
- * - if count is 0 then there are no active readers or writers
- * - if count is +ve then that is the number of active readers
- * - if count is -1 then there is one active writer
- * - if wait_list is not empty, then there are processes waiting for the semaphore
- */
-struct rw_semaphore {
-       __s32                   count;
-       raw_spinlock_t          wait_lock;
-       struct list_head        wait_list;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-       struct lockdep_map dep_map;
-#endif
-};
-
-#define RWSEM_UNLOCKED_VALUE           0x00000000
-
-extern void __down_read(struct rw_semaphore *sem);
-extern int __must_check __down_read_killable(struct rw_semaphore *sem);
-extern int __down_read_trylock(struct rw_semaphore *sem);
-extern void __down_write(struct rw_semaphore *sem);
-extern int __must_check __down_write_killable(struct rw_semaphore *sem);
-extern int __down_write_trylock(struct rw_semaphore *sem);
-extern void __up_read(struct rw_semaphore *sem);
-extern void __up_write(struct rw_semaphore *sem);
-extern void __downgrade_write(struct rw_semaphore *sem);
-extern int rwsem_is_locked(struct rw_semaphore *sem);
-
-#endif /* __KERNEL__ */
-#endif /* _LINUX_RWSEM_SPINLOCK_H */
index 67dbb57508b1f2824338b3169da8887a19cc551a..2ea18a3def045b4f71ccea1cf428a92d8d50734b 100644 (file)
 #include <linux/osq_lock.h>
 #endif
 
-struct rw_semaphore;
-
-#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
-#include <linux/rwsem-spinlock.h> /* use a generic implementation */
-#define __RWSEM_INIT_COUNT(name)       .count = RWSEM_UNLOCKED_VALUE
-#else
-/* All arch specific implementations share the same struct */
+/*
+ * For an uncontended rwsem, count and owner are the only fields a task
+ * needs to touch when acquiring the rwsem. So they are put next to each
+ * other to increase the chance that they will share the same cacheline.
+ *
+ * In a contended rwsem, the owner is likely the most frequently accessed
+ * field in the structure as the optimistic waiter that holds the osq lock
+ * will spin on owner. For an embedded rwsem, other hot fields in the
+ * containing structure should be moved further away from the rwsem to
+ * reduce the chance that they will share the same cacheline causing
+ * cacheline bouncing problem.
+ */
 struct rw_semaphore {
        atomic_long_t count;
-       struct list_head wait_list;
-       raw_spinlock_t wait_lock;
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
-       struct optimistic_spin_queue osq; /* spinner MCS lock */
        /*
         * Write owner. Used as a speculative check to see
         * if the owner is running on the cpu.
         */
        struct task_struct *owner;
+       struct optimistic_spin_queue osq; /* spinner MCS lock */
 #endif
+       raw_spinlock_t wait_lock;
+       struct list_head wait_list;
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        struct lockdep_map      dep_map;
 #endif
@@ -50,24 +55,14 @@ struct rw_semaphore {
  */
 #define RWSEM_OWNER_UNKNOWN    ((struct task_struct *)-2L)
 
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-/* Include the arch specific part */
-#include <asm/rwsem.h>
-
 /* In all implementations count != 0 means locked */
 static inline int rwsem_is_locked(struct rw_semaphore *sem)
 {
        return atomic_long_read(&sem->count) != 0;
 }
 
+#define RWSEM_UNLOCKED_VALUE           0L
 #define __RWSEM_INIT_COUNT(name)       .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
-#endif
 
 /* Common initializer macros and functions */
 
index 14d558146aea20e9ccc5267bd6a30856bce61406..20f3e3f029b9cadde3586aa725632871ee4d4a3e 100644 (file)
@@ -330,7 +330,7 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
 /*
  * This one is special, since it doesn't actually clear the bit, rather it
  * sets the corresponding bit in the ->cleared mask instead. Paired with
- * the caller doing sbitmap_batch_clear() if a given index is full, which
+ * the caller doing sbitmap_deferred_clear() if a given index is full, which
  * will clear the previously freed entries in the corresponding ->word.
  */
 static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
index 1549584a15388a21b6a3d02938da2cfa50c8ccce..50606a6e73d686ea6a3dad3c1d7342f620cdea7b 100644 (file)
@@ -1057,7 +1057,6 @@ struct task_struct {
 
 #ifdef CONFIG_RSEQ
        struct rseq __user *rseq;
-       u32 rseq_len;
        u32 rseq_sig;
        /*
         * RmW on rseq_event_mask must be performed atomically
@@ -1855,12 +1854,10 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
 {
        if (clone_flags & CLONE_THREAD) {
                t->rseq = NULL;
-               t->rseq_len = 0;
                t->rseq_sig = 0;
                t->rseq_event_mask = 0;
        } else {
                t->rseq = current->rseq;
-               t->rseq_len = current->rseq_len;
                t->rseq_sig = current->rseq_sig;
                t->rseq_event_mask = current->rseq_event_mask;
        }
@@ -1869,7 +1866,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
 static inline void rseq_execve(struct task_struct *t)
 {
        t->rseq = NULL;
-       t->rseq_len = 0;
        t->rseq_sig = 0;
        t->rseq_event_mask = 0;
 }
index 0cd9f10423fb8e60645685ab5bdbad675d803a51..a3fda9f024c3c1988b6ff60954d7f7e74a9c1ecf 100644 (file)
@@ -49,6 +49,27 @@ static inline void mmdrop(struct mm_struct *mm)
                __mmdrop(mm);
 }
 
+/*
+ * This has to be called after a get_task_mm()/mmget_not_zero()
+ * followed by taking the mmap_sem for writing before modifying the
+ * vmas or anything the coredump pretends not to change from under it.
+ *
+ * NOTE: find_extend_vma() called from GUP context is the only place
+ * that can modify the "mm" (notably the vm_start/end) under mmap_sem
+ * for reading and outside the context of the process, so it is also
+ * the only case that holds the mmap_sem for reading that must call
+ * this function. Generally if the mmap_sem is hold for reading
+ * there's no need of this check after get_task_mm()/mmget_not_zero().
+ *
+ * This function can be obsoleted and the check can be removed, after
+ * the coredump code will hold the mmap_sem for writing before
+ * invoking the ->core_dump methods.
+ */
+static inline bool mmget_still_valid(struct mm_struct *mm)
+{
+       return likely(!mm->core_state);
+}
+
 /**
  * mmget() - Pin the address space associated with a &struct mm_struct.
  * @mm: The address space to pin.
index ae56551976983dc508d3f3d63903d3a284a5f3a4..e412c092c1e821edd18f0c9dc0ed734ced5d6234 100644 (file)
@@ -418,10 +418,20 @@ static inline void set_restore_sigmask(void)
        set_thread_flag(TIF_RESTORE_SIGMASK);
        WARN_ON(!test_thread_flag(TIF_SIGPENDING));
 }
+
+static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
+{
+       clear_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
+}
+
 static inline void clear_restore_sigmask(void)
 {
        clear_thread_flag(TIF_RESTORE_SIGMASK);
 }
+static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
+{
+       return test_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
+}
 static inline bool test_restore_sigmask(void)
 {
        return test_thread_flag(TIF_RESTORE_SIGMASK);
@@ -439,6 +449,10 @@ static inline void set_restore_sigmask(void)
        current->restore_sigmask = true;
        WARN_ON(!test_thread_flag(TIF_SIGPENDING));
 }
+static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
+{
+       tsk->restore_sigmask = false;
+}
 static inline void clear_restore_sigmask(void)
 {
        current->restore_sigmask = false;
@@ -447,6 +461,10 @@ static inline bool test_restore_sigmask(void)
 {
        return current->restore_sigmask;
 }
+static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
+{
+       return tsk->restore_sigmask;
+}
 static inline bool test_and_clear_restore_sigmask(void)
 {
        if (!current->restore_sigmask)
index 57c7ed3fe46590cd6d4efe387b2133911bb293ed..cfc0a89a715981ac1d7765257077740930d2fa25 100644 (file)
@@ -76,8 +76,8 @@ struct sched_domain_shared {
 
 struct sched_domain {
        /* These fields must be setup */
-       struct sched_domain *parent;    /* top domain must be null terminated */
-       struct sched_domain *child;     /* bottom domain must be null terminated */
+       struct sched_domain __rcu *parent;      /* top domain must be null terminated */
+       struct sched_domain __rcu *child;       /* bottom domain must be null terminated */
        struct sched_group *groups;     /* the balancing groups of the domain */
        unsigned long min_interval;     /* Minimum balance interval ms */
        unsigned long max_interval;     /* Maximum balance interval ms */
index f3fb1edb3526ddc0c582f0ad32017ab7eaf21dd3..20d815a331454f93e7a66d808a5a5f84601e9a58 100644 (file)
@@ -21,6 +21,7 @@ struct shmem_inode_info {
        struct list_head        swaplist;       /* chain of maybes on swap */
        struct shared_policy    policy;         /* NUMA memory alloc policy */
        struct simple_xattrs    xattrs;         /* list of xattrs */
+       atomic_t                stop_eviction;  /* hold when working on inode */
        struct inode            vfs_inode;
 };
 
index 11b45f7ae4057c3b70105974b8f527dd6f30d8ba..9449b19c5f107a73bfe7eca9fe875708862dc3b9 100644 (file)
@@ -32,6 +32,8 @@
 #define SLAB_HWCACHE_ALIGN     ((slab_flags_t __force)0x00002000U)
 /* Use GFP_DMA memory */
 #define SLAB_CACHE_DMA         ((slab_flags_t __force)0x00004000U)
+/* Use GFP_DMA32 memory */
+#define SLAB_CACHE_DMA32       ((slab_flags_t __force)0x00008000U)
 /* DEBUG: Store the last owner for bug hunting */
 #define SLAB_STORE_USER                ((slab_flags_t __force)0x00010000U)
 /* Panic if kmem_cache_create() fails */
index d0884b52500100b92ae4829e070ab3a9993af4b3..9d1bc65d226cc00b8cbd73484279ed3d6aa852d9 100644 (file)
@@ -29,7 +29,7 @@ struct smpboot_thread_data;
  * @thread_comm:       The base name of the thread
  */
 struct smp_hotplug_thread {
-       struct task_struct __percpu     **store;
+       struct task_struct              * __percpu *store;
        struct list_head                list;
        int                             (*thread_should_run)(unsigned int cpu);
        void                            (*thread_fn)(unsigned int cpu);
index 6016daeecee41f28511f46c8618fbf05a16aff8a..b57cd8bf96e2b67c6588716cdfbd92dba58d7b0f 100644 (file)
@@ -26,7 +26,7 @@ typedef __kernel_sa_family_t  sa_family_t;
 /*
  *     1003.1g requires sa_family_t and that sa_data is char.
  */
+
 struct sockaddr {
        sa_family_t     sa_family;      /* address family, AF_xxx       */
        char            sa_data[14];    /* 14 bytes of protocol address */
@@ -44,7 +44,7 @@ struct linger {
  *     system, not 4.3. Thus msg_accrights(len) are now missing. They
  *     belong in an obscure libc emulation or the bin.
  */
+
 struct msghdr {
        void            *msg_name;      /* ptr to socket address structure */
        int             msg_namelen;    /* size of socket address structure */
@@ -54,7 +54,7 @@ struct msghdr {
        unsigned int    msg_flags;      /* flags on received message */
        struct kiocb    *msg_iocb;      /* ptr to iocb for async requests */
 };
+
 struct user_msghdr {
        void            __user *msg_name;       /* ptr to socket address structure */
        int             msg_namelen;            /* size of socket address structure */
@@ -122,7 +122,7 @@ struct cmsghdr {
  *     inside range, given by msg->msg_controllen before using
  *     ancillary object DATA.                          --ANK (980731)
  */
+
 static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
                                               struct cmsghdr *__cmsg)
 {
@@ -264,10 +264,10 @@ struct ucred {
 /* Maximum queue length specifiable by listen.  */
 #define SOMAXCONN      128
 
-/* Flags we can use with send/ and recv. 
+/* Flags we can use with send/ and recv.
    Added those for 1003.1g not all are supported yet
  */
+
 #define MSG_OOB                1
 #define MSG_PEEK       2
 #define MSG_DONTROUTE  4
index c495b2d51569e7b453ee9c0ad7e51012f0f4c220..e432cc92c73de7d1ae73aa5b69486e8311547d82 100644 (file)
@@ -56,45 +56,11 @@ struct srcu_struct { };
 
 void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
                void (*func)(struct rcu_head *head));
-void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced);
+void cleanup_srcu_struct(struct srcu_struct *ssp);
 int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
 void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
 void synchronize_srcu(struct srcu_struct *ssp);
 
-/**
- * cleanup_srcu_struct - deconstruct a sleep-RCU structure
- * @ssp: structure to clean up.
- *
- * Must invoke this after you are finished using a given srcu_struct that
- * was initialized via init_srcu_struct(), else you leak memory.
- */
-static inline void cleanup_srcu_struct(struct srcu_struct *ssp)
-{
-       _cleanup_srcu_struct(ssp, false);
-}
-
-/**
- * cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure
- * @ssp: structure to clean up.
- *
- * Must invoke this after you are finished using a given srcu_struct that
- * was initialized via init_srcu_struct(), else you leak memory.  Also,
- * all grace-period processing must have completed.
- *
- * "Completed" means that the last synchronize_srcu() and
- * synchronize_srcu_expedited() calls must have returned before the call
- * to cleanup_srcu_struct_quiesced().  It also means that the callback
- * from the last call_srcu() must have been invoked before the call to
- * cleanup_srcu_struct_quiesced(), but you can use srcu_barrier() to help
- * with this last.  Violating these rules will get you a WARN_ON() splat
- * (with high probability, anyway), and will also cause the srcu_struct
- * to be leaked.
- */
-static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *ssp)
-{
-       _cleanup_srcu_struct(ssp, true);
-}
-
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 
 /**
index 7978b3e2c1e130cac7bfce4c24c40e6ec5f0f4a3..0805dee1b6b897f637c5d0eb67e75ac264c23900 100644 (file)
 
 typedef u32 depot_stack_handle_t;
 
-struct stack_trace;
+depot_stack_handle_t stack_depot_save(unsigned long *entries,
+                                     unsigned int nr_entries, gfp_t gfp_flags);
 
-depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags);
-
-void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace);
+unsigned int stack_depot_fetch(depot_stack_handle_t handle,
+                              unsigned long **entries);
 
 #endif
index ba29a0613e66ffa83162692f05e367e7dd129d82..f0cfd12cb45eb5e849e9e753cab8e9a701879719 100644 (file)
@@ -3,11 +3,64 @@
 #define __LINUX_STACKTRACE_H
 
 #include <linux/types.h>
+#include <asm/errno.h>
 
 struct task_struct;
 struct pt_regs;
 
 #ifdef CONFIG_STACKTRACE
+void stack_trace_print(unsigned long *trace, unsigned int nr_entries,
+                      int spaces);
+int stack_trace_snprint(char *buf, size_t size, unsigned long *entries,
+                       unsigned int nr_entries, int spaces);
+unsigned int stack_trace_save(unsigned long *store, unsigned int size,
+                             unsigned int skipnr);
+unsigned int stack_trace_save_tsk(struct task_struct *task,
+                                 unsigned long *store, unsigned int size,
+                                 unsigned int skipnr);
+unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
+                                  unsigned int size, unsigned int skipnr);
+unsigned int stack_trace_save_user(unsigned long *store, unsigned int size);
+
+/* Internal interfaces. Do not use in generic code */
+#ifdef CONFIG_ARCH_STACKWALK
+
+/**
+ * stack_trace_consume_fn - Callback for arch_stack_walk()
+ * @cookie:    Caller supplied pointer handed back by arch_stack_walk()
+ * @addr:      The stack entry address to consume
+ * @reliable:  True when the stack entry is reliable. Required by
+ *             some printk based consumers.
+ *
+ * Return:     True, if the entry was consumed or skipped
+ *             False, if there is no space left to store
+ */
+typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr,
+                                      bool reliable);
+/**
+ * arch_stack_walk - Architecture specific function to walk the stack
+ * @consume_entry:     Callback which is invoked by the architecture code for
+ *                     each entry.
+ * @cookie:            Caller supplied pointer which is handed back to
+ *                     @consume_entry
+ * @task:              Pointer to a task struct, can be NULL
+ * @regs:              Pointer to registers, can be NULL
+ *
+ * ============ ======= ============================================
+ * task                regs
+ * ============ ======= ============================================
+ * task                NULL    Stack trace from task (can be current)
+ * current     regs    Stack trace starting on regs->stackpointer
+ * ============ ======= ============================================
+ */
+void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+                    struct task_struct *task, struct pt_regs *regs);
+int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie,
+                            struct task_struct *task);
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+                         const struct pt_regs *regs);
+
+#else /* CONFIG_ARCH_STACKWALK */
 struct stack_trace {
        unsigned int nr_entries, max_entries;
        unsigned long *entries;
@@ -21,24 +74,20 @@ extern void save_stack_trace_tsk(struct task_struct *tsk,
                                struct stack_trace *trace);
 extern int save_stack_trace_tsk_reliable(struct task_struct *tsk,
                                         struct stack_trace *trace);
-
-extern void print_stack_trace(struct stack_trace *trace, int spaces);
-extern int snprint_stack_trace(char *buf, size_t size,
-                       struct stack_trace *trace, int spaces);
-
-#ifdef CONFIG_USER_STACKTRACE_SUPPORT
 extern void save_stack_trace_user(struct stack_trace *trace);
+#endif /* !CONFIG_ARCH_STACKWALK */
+#endif /* CONFIG_STACKTRACE */
+
+#if defined(CONFIG_STACKTRACE) && defined(CONFIG_HAVE_RELIABLE_STACKTRACE)
+int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
+                                 unsigned int size);
 #else
-# define save_stack_trace_user(trace)              do { } while (0)
+static inline int stack_trace_save_tsk_reliable(struct task_struct *tsk,
+                                               unsigned long *store,
+                                               unsigned int size)
+{
+       return -ENOSYS;
+}
 #endif
 
-#else /* !CONFIG_STACKTRACE */
-# define save_stack_trace(trace)                       do { } while (0)
-# define save_stack_trace_tsk(tsk, trace)              do { } while (0)
-# define save_stack_trace_user(trace)                  do { } while (0)
-# define print_stack_trace(trace, spaces)              do { } while (0)
-# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0)
-# define save_stack_trace_tsk_reliable(tsk, trace)     ({ -ENOSYS; })
-#endif /* CONFIG_STACKTRACE */
-
 #endif /* __LINUX_STACKTRACE_H */
index 7927b875f80cf6ff74425e6d6a990aa27a032e21..6ab0a6fa512e75882f62a517e22f53ad946b2356 100644 (file)
@@ -150,6 +150,9 @@ extern void * memscan(void *,int,__kernel_size_t);
 #ifndef __HAVE_ARCH_MEMCMP
 extern int memcmp(const void *,const void *,__kernel_size_t);
 #endif
+#ifndef __HAVE_ARCH_BCMP
+extern int bcmp(const void *,const void *,__kernel_size_t);
+#endif
 #ifndef __HAVE_ARCH_MEMCHR
 extern void * memchr(const void *,int,__kernel_size_t);
 #endif
index ec861cd0cfe8ce9fa5425909d243c660231f06c9..52d41d0c1ae1d54b6829a10134318ee2835b9fd5 100644 (file)
@@ -304,12 +304,4 @@ rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
 }
 #endif /* CONFIG_SUNRPC_SWAP */
 
-static inline bool
-rpc_task_need_resched(const struct rpc_task *task)
-{
-       if (RPC_IS_QUEUED(task) || task->tk_callback)
-               return true;
-       return false;
-}
-
 #endif /* _LINUX_SUNRPC_SCHED_H_ */
index 37b226e8df13f3b6235277485519b5de37cf6fe2..2b70130af58578da68627927201efd1c5160900a 100644 (file)
@@ -268,6 +268,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
 #define user_access_end() do { } while (0)
 #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
 #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
+static inline unsigned long user_access_save(void) { return 0UL; }
+static inline void user_access_restore(unsigned long flags) { }
 #endif
 
 #ifdef CONFIG_HARDENED_USERCOPY
index 87477e1640f9217223f7cbcde6b3fa416ef58ac5..2d0131ad46041dd8f036cc4063f2702a16f0827d 100644 (file)
@@ -23,14 +23,23 @@ struct kvec {
 };
 
 enum iter_type {
-       ITER_IOVEC = 0,
-       ITER_KVEC = 2,
-       ITER_BVEC = 4,
-       ITER_PIPE = 8,
-       ITER_DISCARD = 16,
+       /* set if ITER_BVEC doesn't hold a bv_page ref */
+       ITER_BVEC_FLAG_NO_REF = 2,
+
+       /* iter types */
+       ITER_IOVEC = 4,
+       ITER_KVEC = 8,
+       ITER_BVEC = 16,
+       ITER_PIPE = 32,
+       ITER_DISCARD = 64,
 };
 
 struct iov_iter {
+       /*
+        * Bit 0 is the read/write bit, set if we're writing.
+        * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
+        * the caller isn't expecting to drop a page reference when done.
+        */
        unsigned int type;
        size_t iov_offset;
        size_t count;
@@ -51,7 +60,7 @@ struct iov_iter {
 
 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
 {
-       return i->type & ~(READ | WRITE);
+       return i->type & ~(READ | WRITE | ITER_BVEC_FLAG_NO_REF);
 }
 
 static inline bool iter_is_iovec(const struct iov_iter *i)
@@ -84,6 +93,11 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i)
        return i->type & (READ | WRITE);
 }
 
+static inline bool iov_iter_bvec_no_ref(const struct iov_iter *i)
+{
+       return (i->type & ITER_BVEC_FLAG_NO_REF) != 0;
+}
+
 /*
  * Total number of bytes covered by an iovec.
  *
index 5e49e82c43684854c379e18a1d698d79ac4ef347..ff010d1fd1c787f4e2f1a973fc5c07350769dece 100644 (file)
@@ -200,7 +200,6 @@ usb_find_last_int_out_endpoint(struct usb_host_interface *alt,
  * @dev: driver model's view of this device
  * @usb_dev: if an interface is bound to the USB major, this will point
  *     to the sysfs representation for that device.
- * @pm_usage_cnt: PM usage counter for this interface
  * @reset_ws: Used for scheduling resets from atomic context.
  * @resetting_device: USB core reset the device, so use alt setting 0 as
  *     current; needs bandwidth alloc after reset.
@@ -257,7 +256,6 @@ struct usb_interface {
 
        struct device dev;              /* interface specific device info */
        struct device *usb_dev;
-       atomic_t pm_usage_cnt;          /* usage counter for autosuspend */
        struct work_struct reset_ws;    /* for resets in atomic context */
 };
 #define        to_usb_interface(d) container_of(d, struct usb_interface, dev)
index a240ed2a0372c20281e03a45fe49dea6a2fd60a3..ff56c443180cd6d35ec6f354ea2d519ce5443b83 100644 (file)
@@ -24,15 +24,17 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
 #define vbg_debug pr_debug
 #endif
 
-int vbg_hgcm_connect(struct vbg_dev *gdev,
+int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
                     struct vmmdev_hgcm_service_location *loc,
                     u32 *client_id, int *vbox_status);
 
-int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status);
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
+                       u32 client_id, int *vbox_status);
 
-int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
-                 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
-                 u32 parm_count, int *vbox_status);
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+                 u32 function, u32 timeout_ms,
+                 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
+                 int *vbox_status);
 
 /**
  * Convert a VirtualBox status code to a standard Linux kernel return value.
index fab02133a9197a43cd9df33728e657e1679c5e16..3dc70adfe5f5edbbfa6f6508849d1d3630a46e2b 100644 (file)
@@ -63,7 +63,7 @@ struct virtqueue;
 /*
  * Creates a virtqueue and allocates the descriptor ring.  If
  * may_reduce_num is set, then this may allocate a smaller ring than
- * expected.  The caller should query virtqueue_get_ring_size to learn
+ * expected.  The caller should query virtqueue_get_vring_size to learn
  * the actual size of the ring.
  */
 struct virtqueue *vring_create_virtqueue(unsigned int index,
index 23f61850f3639ae1686f9f83e0bac6e6a5015ba5..1832402324cef7a00cc1ca97327aa21dd65e836b 100644 (file)
@@ -35,6 +35,7 @@ struct charlcd_ops {
 };
 
 struct charlcd *charlcd_alloc(unsigned int drvdata_size);
+void charlcd_free(struct charlcd *lcd);
 
 int charlcd_register(struct charlcd *lcd);
 int charlcd_unregister(struct charlcd *lcd);
index c745e9ccfab2d6f86a58c1a038e416fe29a63d67..c61a1bf4e3de544dd41886e9126b7ff20eb829c3 100644 (file)
@@ -39,7 +39,7 @@ struct tc_action {
        struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
        struct gnet_stats_queue __percpu *cpu_qstats;
        struct tc_cookie        __rcu *act_cookie;
-       struct tcf_chain        *goto_chain;
+       struct tcf_chain        __rcu *goto_chain;
 };
 #define tcf_index      common.tcfa_index
 #define tcf_refcnt     common.tcfa_refcnt
@@ -90,7 +90,7 @@ struct tc_action_ops {
        int     (*lookup)(struct net *net, struct tc_action **a, u32 index);
        int     (*init)(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **act, int ovr,
-                       int bind, bool rtnl_held,
+                       int bind, bool rtnl_held, struct tcf_proto *tp,
                        struct netlink_ext_ack *extack);
        int     (*walk)(struct net *, struct sk_buff *,
                        struct netlink_callback *, int,
@@ -181,6 +181,11 @@ int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
 int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
 int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
 
+int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
+                            struct tcf_chain **handle,
+                            struct netlink_ext_ack *newchain);
+struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
+                                        struct tcf_chain *newchain);
 #endif /* CONFIG_NET_CLS_ACT */
 
 static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
index 2bfb87eb98ce15cd693819d42205a036ae6dd42f..78c856cba4f538c078fada09ef3238c2bc220069 100644 (file)
@@ -61,10 +61,12 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
                               rxrpc_user_attach_call_t, unsigned long, gfp_t,
                               unsigned int);
 void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
-u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
+bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *,
+                            u32 *);
 void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
 u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
 bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
                                 ktime_t *);
+bool rxrpc_kernel_call_is_complete(struct rxrpc_call *);
 
 #endif /* _NET_RXRPC_H */
index bb307a11ee636b7194bbe7d31c83f3d798b3379a..13bfeb712d36943cf9c04111a777d39cf08034a9 100644 (file)
@@ -7183,6 +7183,11 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
 #define wiphy_info(wiphy, format, args...)                     \
        dev_info(&(wiphy)->dev, format, ##args)
 
+#define wiphy_err_ratelimited(wiphy, format, args...)          \
+       dev_err_ratelimited(&(wiphy)->dev, format, ##args)
+#define wiphy_warn_ratelimited(wiphy, format, args...)         \
+       dev_warn_ratelimited(&(wiphy)->dev, format, ##args)
+
 #define wiphy_debug(wiphy, format, args...)                    \
        wiphy_printk(KERN_DEBUG, wiphy, format, ##args)
 
index be3cad9c2e4c37b282e5c2d0e2ef5f05a79b7438..583526aad1d0ac6ac3681cf9cf56d4496a221c3f 100644 (file)
@@ -677,7 +677,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
                             unsigned char __user *data, int optlen);
 void ip_options_undo(struct ip_options *opt);
 void ip_forward_options(struct sk_buff *skb);
-int ip_options_rcv_srr(struct sk_buff *skb);
+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
 
 /*
  *     Functions provided by ip_sockglue.c
index ac2ed8ec662bd97ebe0337085e78e5a61906d499..112dc18c658f15f79525cae64afa0ea38e2a1159 100644 (file)
@@ -6231,8 +6231,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
  * @hw: pointer as obtained from ieee80211_alloc_hw()
  * @ac: AC number to return packets from.
  *
- * Should only be called between calls to ieee80211_txq_schedule_start()
- * and ieee80211_txq_schedule_end().
  * Returns the next txq if successful, %NULL if no queue is eligible. If a txq
  * is returned, it should be returned with ieee80211_return_txq() after the
  * driver has finished scheduling it.
@@ -6240,51 +6238,58 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
 struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac);
 
 /**
- * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
- *
- * @hw: pointer as obtained from ieee80211_alloc_hw()
- * @txq: pointer obtained from station or virtual interface
- *
- * Should only be called between calls to ieee80211_txq_schedule_start()
- * and ieee80211_txq_schedule_end().
- */
-void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
-
-/**
- * ieee80211_txq_schedule_start - acquire locks for safe scheduling of an AC
+ * ieee80211_txq_schedule_start - start new scheduling round for TXQs
  *
  * @hw: pointer as obtained from ieee80211_alloc_hw()
  * @ac: AC number to acquire locks for
  *
- * Acquire locks needed to schedule TXQs from the given AC. Should be called
- * before ieee80211_next_txq() or ieee80211_return_txq().
+ * Should be called before ieee80211_next_txq() or ieee80211_return_txq().
+ * The driver must not call multiple TXQ scheduling rounds concurrently.
  */
-void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
-       __acquires(txq_lock);
+void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac);
+
+/* (deprecated) */
+static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
+{
+}
+
+void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
+                             struct ieee80211_txq *txq, bool force);
 
 /**
- * ieee80211_txq_schedule_end - release locks for safe scheduling of an AC
+ * ieee80211_schedule_txq - schedule a TXQ for transmission
  *
  * @hw: pointer as obtained from ieee80211_alloc_hw()
- * @ac: AC number to acquire locks for
+ * @txq: pointer obtained from station or virtual interface
  *
- * Release locks previously acquired by ieee80211_txq_schedule_end().
+ * Schedules a TXQ for transmission if it is not already scheduled,
+ * even if mac80211 does not have any packets buffered.
+ *
+ * The driver may call this function if it has buffered packets for
+ * this TXQ internally.
  */
-void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
-       __releases(txq_lock);
+static inline void
+ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+       __ieee80211_schedule_txq(hw, txq, true);
+}
 
 /**
- * ieee80211_schedule_txq - schedule a TXQ for transmission
+ * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
  *
  * @hw: pointer as obtained from ieee80211_alloc_hw()
  * @txq: pointer obtained from station or virtual interface
+ * @force: schedule txq even if mac80211 does not have any buffered packets.
  *
- * Schedules a TXQ for transmission if it is not already scheduled. Takes a
- * lock, which means it must *not* be called between
- * ieee80211_txq_schedule_start() and ieee80211_txq_schedule_end()
+ * The driver may set force=true if it has buffered packets for this TXQ
+ * internally.
  */
-void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
-       __acquires(txq_lock) __releases(txq_lock);
+static inline void
+ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
+                    bool force)
+{
+       __ieee80211_schedule_txq(hw, txq, force);
+}
 
 /**
  * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
index a68ced28d8f47e09c6f6beabc56a22a04ae3f163..12689ddfc24c44fe3297d1eda548811d8061670b 100644 (file)
@@ -59,6 +59,7 @@ struct net {
                                                 */
        spinlock_t              rules_mod_lock;
 
+       u32                     hash_mix;
        atomic64_t              cookie_gen;
 
        struct list_head        list;           /* list of network namespaces */
index 5ee7b30b4917244a0f74d28e93c05e1c3fc59a04..d2bc733a2ef1edf2ee7159457b1f33a676e74a98 100644 (file)
@@ -316,6 +316,8 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
                                 gfp_t flags);
 void nf_ct_tmpl_free(struct nf_conn *tmpl);
 
+u32 nf_ct_get_id(const struct nf_conn *ct);
+
 static inline void
 nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
 {
index 778087591983dd2a274ef9aa75d9b09caf78e8af..a49edfdf47e83ece9945d8978dc3e260a5d9d7e0 100644 (file)
@@ -75,6 +75,12 @@ bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple,
 bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple,
                                      const struct nf_conntrack_tuple *orig);
 
+int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb,
+                           unsigned int dataoff,
+                           const struct nf_hook_state *state,
+                           u8 l4proto,
+                           union nf_inet_addr *outer_daddr);
+
 int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
                              struct sk_buff *skb,
                              unsigned int dataoff,
index 16a842456189f2fc1a3363685b5dd4310a32b2b8..d9b665151f3d9e916f35620141542a5a145e6123 100644 (file)
@@ -2,16 +2,10 @@
 #ifndef __NET_NS_HASH_H__
 #define __NET_NS_HASH_H__
 
-#include <asm/cache.h>
-
-struct net;
+#include <net/net_namespace.h>
 
 static inline u32 net_hash_mix(const struct net *net)
 {
-#ifdef CONFIG_NET_NS
-       return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
-#else
-       return 0;
-#endif
+       return net->hash_mix;
 }
 #endif
index 5a0714ff500fd09bd288360a83dad57952e5efaf..80f15b1c1a489a71479845ae0d077875b1a52f66 100644 (file)
@@ -266,7 +266,7 @@ void nr_stop_idletimer(struct sock *);
 int nr_t1timer_running(struct sock *);
 
 /* sysctl_net_netrom.c */
-void nr_register_sysctl(void);
+int nr_register_sysctl(void);
 void nr_unregister_sysctl(void);
 
 #endif
index 87499b6b35d6dd75ea3058449c5db484e2aca611..df5c69db68afc334d0ac51c031ca6120d8b7e6e9 100644 (file)
@@ -166,7 +166,7 @@ struct nci_conn_info {
  * According to specification 102 622 chapter 4.4 Pipes,
  * the pipe identifier is 7 bits long.
  */
-#define NCI_HCI_MAX_PIPES          127
+#define NCI_HCI_MAX_PIPES          128
 
 struct nci_hci_gate {
        u8 gate;
index 31284c078d06b6bb426fec08e4776a1e67ada41b..a2b38b3deeca2096d01d536d9a492047827ff994 100644 (file)
@@ -378,6 +378,7 @@ struct tcf_chain {
        bool flushing;
        const struct tcf_proto_ops *tmplt_ops;
        void *tmplt_priv;
+       struct rcu_head rcu;
 };
 
 struct tcf_block {
@@ -922,6 +923,41 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
        sch->qstats.overlimits++;
 }
 
+static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
+{
+       __u32 qlen = qdisc_qlen_sum(sch);
+
+       return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
+}
+
+static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch,  __u32 *qlen,
+                                            __u32 *backlog)
+{
+       struct gnet_stats_queue qstats = { 0 };
+       __u32 len = qdisc_qlen_sum(sch);
+
+       __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
+       *qlen = qstats.qlen;
+       *backlog = qstats.backlog;
+}
+
+static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
+{
+       __u32 qlen, backlog;
+
+       qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+       qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
+static inline void qdisc_purge_queue(struct Qdisc *sch)
+{
+       __u32 qlen, backlog;
+
+       qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+       qdisc_reset(sch);
+       qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
 {
        qh->head = NULL;
@@ -1105,13 +1141,8 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
        sch_tree_lock(sch);
        old = *pold;
        *pold = new;
-       if (old != NULL) {
-               unsigned int qlen = old->q.qlen;
-               unsigned int backlog = old->qstats.backlog;
-
-               qdisc_reset(old);
-               qdisc_tree_reduce_backlog(old, qlen, backlog);
-       }
+       if (old != NULL)
+               qdisc_tree_flush_backlog(old);
        sch_tree_unlock(sch);
 
        return old;
index 32ee65a30aff1146dcafcc533e73833e190cb887..1c6e6c0766ca09b771d865883c7c4daf390215c6 100644 (file)
@@ -61,7 +61,7 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
 static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
                                        unsigned int offset)
 {
-       struct sctphdr *sh = sctp_hdr(skb);
+       struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
        const struct skb_checksum_ops ops = {
                .update  = sctp_csum_update,
                .combine = sctp_csum_combine,
index 6640f84fe5368f868e49bfb9ea2c3f94639b4e80..6d5beac29bc1166e18b7e6d5c8b66073b86318b5 100644 (file)
@@ -105,7 +105,6 @@ enum sctp_verb {
        SCTP_CMD_T1_RETRAN,      /* Mark for retransmission after T1 timeout  */
        SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
        SCTP_CMD_SEND_MSG,       /* Send the whole use message */
-       SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
        SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
        SCTP_CMD_SET_ASOC,       /* Restore association context */
        SCTP_CMD_LAST
index 328cb7cb7b0bb93f1eb3ec2708c43b29b56efb15..341f8bafa0cf585fc72e5819054f1b2f15a8e338 100644 (file)
@@ -710,6 +710,12 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
                hlist_add_head_rcu(&sk->sk_node, list);
 }
 
+static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
+{
+       sock_hold(sk);
+       hlist_add_tail_rcu(&sk->sk_node, list);
+}
+
 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 {
        hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
@@ -2078,12 +2084,6 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
  * @p:              poll_table
  *
  * See the comments in the wq_has_sleeper function.
- *
- * Do not derive sock from filp->private_data here. An SMC socket establishes
- * an internal TCP socket that is used in the fallback case. All socket
- * operations on the SMC socket are then forwarded to the TCP socket. In case of
- * poll, the filp->private_data pointer references the SMC socket because the
- * TCP socket has no file assigned.
  */
 static inline void sock_poll_wait(struct file *filp, struct socket *sock,
                                  poll_table *p)
index ee8d005f56fcddb0e3dc5d68b10a5dd1efdeb8f7..eb8f01c819e636aca55019cbe791b0e40b612baf 100644 (file)
@@ -56,7 +56,7 @@ static inline bool is_tcf_gact_goto_chain(const struct tc_action *a)
 
 static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a)
 {
-       return a->goto_chain->index;
+       return READ_ONCE(a->tcfa_action) & TC_ACT_EXT_VAL_MASK;
 }
 
 #endif /* __NET_TC_GACT_H */
index a5a938583295c0789df287c737b7a1c87556c9f1..5934246b2c6f4bafbe318fdddfacb328a2b9bf5c 100644 (file)
@@ -307,6 +307,7 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 int tls_device_sendpage(struct sock *sk, struct page *page,
                        int offset, size_t size, int flags);
 void tls_device_sk_destruct(struct sock *sk);
+void tls_device_free_resources_tx(struct sock *sk);
 void tls_device_init(void);
 void tls_device_cleanup(void);
 int tls_tx_records(struct sock *sk, int flags);
@@ -330,6 +331,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx,
                int flags);
 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
                            int flags);
+bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
 
 static inline struct tls_msg *tls_msg(struct sk_buff *skb)
 {
@@ -379,7 +381,7 @@ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
 {
 #ifdef CONFIG_SOCK_VALIDATE_XMIT
-       return sk_fullsock(sk) &
+       return sk_fullsock(sk) &&
               (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
               &tls_validate_xmit_skb);
 #else
index 61cf7dbb678298559ccf9ea432adef04507bc597..d074b6d60f8af77a355b6755ef3d096237ff55bc 100644 (file)
@@ -36,7 +36,6 @@ struct xdp_umem {
        u32 headroom;
        u32 chunk_size_nohr;
        struct user_struct *user;
-       struct pid *pid;
        unsigned long address;
        refcount_t users;
        struct work_struct work;
index bbcf0b650b81e5a455db95a1967a768d90a13fda..99f722c4d8044dca73f13f3a27376a20cda6181f 100644 (file)
@@ -295,7 +295,8 @@ struct xfrm_replay {
 };
 
 struct xfrm_if_cb {
-       struct xfrm_if  *(*decode_session)(struct sk_buff *skb);
+       struct xfrm_if  *(*decode_session)(struct sk_buff *skb,
+                                          unsigned short family);
 };
 
 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
@@ -1404,6 +1405,23 @@ static inline int xfrm_state_kern(const struct xfrm_state *x)
        return atomic_read(&x->tunnel_users);
 }
 
+static inline bool xfrm_id_proto_valid(u8 proto)
+{
+       switch (proto) {
+       case IPPROTO_AH:
+       case IPPROTO_ESP:
+       case IPPROTO_COMP:
+#if IS_ENABLED(CONFIG_IPV6)
+       case IPPROTO_ROUTING:
+       case IPPROTO_DSTOPTS:
+#endif
+               return true;
+       default:
+               return false;
+       }
+}
+
+/* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
 {
        return (!userproto || proto == userproto ||
index eb7db605955b852a469d11b62a93817f7726cf88..482b4ea87c3c4bd9fac786fa7165b30123174ea2 100644 (file)
@@ -802,8 +802,13 @@ struct snd_soc_component_driver {
        int probe_order;
        int remove_order;
 
-       /* signal if the module handling the component cannot be removed */
-       unsigned int ignore_module_refcount:1;
+       /*
+        * signal if the module handling the component should not be removed
+        * if a pcm is open. Setting this would prevent the module
+        * refcount being incremented in probe() but allow it be incremented
+        * when a pcm is opened and decremented when it is closed.
+        */
+       unsigned int module_get_upon_open:1;
 
        /* bits */
        unsigned int idle_bias_on:1;
@@ -1083,6 +1088,8 @@ struct snd_soc_card {
        struct mutex mutex;
        struct mutex dapm_mutex;
 
+       spinlock_t dpcm_lock;
+
        bool instantiated;
        bool topology_shortname_created;
 
index 44a3259ed4a5bde50e231a982624b0893e5bb0eb..b6e0cbc2c71f16df87380860e88544acb6d80a40 100644 (file)
@@ -28,7 +28,7 @@ TRACE_EVENT_FN(sys_enter,
 
        TP_fast_assign(
                __entry->id     = id;
-               syscall_get_arguments(current, regs, 0, 6, __entry->args);
+               syscall_get_arguments(current, regs, __entry->args);
        ),
 
        TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
index 5f24b50c9e88eb72dd2a396d8af5dc193e6bd782..059dc2bedaf6e895d30fab48c637e87471049920 100644 (file)
@@ -7,5 +7,7 @@ no-export-headers += kvm.h
 endif
 
 ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h),)
+ifeq ($(wildcard $(objtree)/arch/$(SRCARCH)/include/generated/uapi/asm/kvm_para.h),)
 no-export-headers += kvm_para.h
 endif
+endif
index 3c38ac9a92a7c4b18cbb4ac49ac60bf887b03d20..929c8e537a14a517c0a3c7ca5b6b15353d622c30 100644 (file)
@@ -502,16 +502,6 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
- *     Description
- *             Push an element *value* in *map*. *flags* is one of:
- *
- *             **BPF_EXIST**
- *             If the queue/stack is full, the oldest element is removed to
- *             make room for this.
- *     Return
- *             0 on success, or a negative error in case of failure.
- *
  * int bpf_probe_read(void *dst, u32 size, const void *src)
  *     Description
  *             For tracing programs, safely attempt to read *size* bytes from
@@ -1435,14 +1425,14 @@ union bpf_attr {
  * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
  *     Description
  *             Equivalent to bpf_get_socket_cookie() helper that accepts
- *             *skb*, but gets socket from **struct bpf_sock_addr** contex.
+ *             *skb*, but gets socket from **struct bpf_sock_addr** context.
  *     Return
  *             A 8-byte long non-decreasing number.
  *
  * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
  *     Description
  *             Equivalent to bpf_get_socket_cookie() helper that accepts
- *             *skb*, but gets socket from **struct bpf_sock_ops** contex.
+ *             *skb*, but gets socket from **struct bpf_sock_ops** context.
  *     Return
  *             A 8-byte long non-decreasing number.
  *
@@ -2098,52 +2088,52 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * int bpf_rc_repeat(void *ctx)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
- *             report a successfully decoded key press with *scancode*,
- *             *toggle* value in the given *protocol*. The scancode will be
- *             translated to a keycode using the rc keymap, and reported as
- *             an input key down event. After a period a key up event is
- *             generated. This period can be extended by calling either
- *             **bpf_rc_keydown**\ () again with the same values, or calling
- *             **bpf_rc_repeat**\ ().
+ *             report a successfully decoded repeat key message. This delays
+ *             the generation of a key up event for previously generated
+ *             key down event.
  *
- *             Some protocols include a toggle bit, in case the button was
- *             released and pressed again between consecutive scancodes.
+ *             Some IR protocols like NEC have a special IR message for
+ *             repeating last button, for when a button is held down.
  *
  *             The *ctx* should point to the lirc sample as passed into
  *             the program.
  *
- *             The *protocol* is the decoded protocol number (see
- *             **enum rc_proto** for some predefined values).
- *
  *             This helper is only available is the kernel was compiled with
  *             the **CONFIG_BPF_LIRC_MODE2** configuration option set to
  *             "**y**".
  *     Return
  *             0
  *
- * int bpf_rc_repeat(void *ctx)
+ * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
- *             report a successfully decoded repeat key message. This delays
- *             the generation of a key up event for previously generated
- *             key down event.
+ *             report a successfully decoded key press with *scancode*,
+ *             *toggle* value in the given *protocol*. The scancode will be
+ *             translated to a keycode using the rc keymap, and reported as
+ *             an input key down event. After a period a key up event is
+ *             generated. This period can be extended by calling either
+ *             **bpf_rc_keydown**\ () again with the same values, or calling
+ *             **bpf_rc_repeat**\ ().
  *
- *             Some IR protocols like NEC have a special IR message for
- *             repeating last button, for when a button is held down.
+ *             Some protocols include a toggle bit, in case the button was
+ *             released and pressed again between consecutive scancodes.
  *
  *             The *ctx* should point to the lirc sample as passed into
  *             the program.
  *
+ *             The *protocol* is the decoded protocol number (see
+ *             **enum rc_proto** for some predefined values).
+ *
  *             This helper is only available is the kernel was compiled with
  *             the **CONFIG_BPF_LIRC_MODE2** configuration option set to
  *             "**y**".
  *     Return
  *             0
  *
- * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
+ * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
  *     Description
  *             Return the cgroup v2 id of the socket associated with the *skb*.
  *             This is roughly similar to the **bpf_get_cgroup_classid**\ ()
@@ -2159,30 +2149,12 @@ union bpf_attr {
  *     Return
  *             The id is returned or 0 in case the id could not be retrieved.
  *
- * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
- *     Description
- *             Return id of cgroup v2 that is ancestor of cgroup associated
- *             with the *skb* at the *ancestor_level*.  The root cgroup is at
- *             *ancestor_level* zero and each step down the hierarchy
- *             increments the level. If *ancestor_level* == level of cgroup
- *             associated with *skb*, then return value will be same as that
- *             of **bpf_skb_cgroup_id**\ ().
- *
- *             The helper is useful to implement policies based on cgroups
- *             that are upper in hierarchy than immediate cgroup associated
- *             with *skb*.
- *
- *             The format of returned id and helper limitations are same as in
- *             **bpf_skb_cgroup_id**\ ().
- *     Return
- *             The id is returned or 0 in case the id could not be retrieved.
- *
  * u64 bpf_get_current_cgroup_id(void)
  *     Return
  *             A 64-bit integer containing the current cgroup id based
  *             on the cgroup within which the current task is running.
  *
- * voidget_local_storage(void *map, u64 flags)
+ * void *bpf_get_local_storage(void *map, u64 flags)
  *     Description
  *             Get the pointer to the local storage area.
  *             The type and the size of the local storage is defined
@@ -2209,6 +2181,24 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
+ * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
+ *     Description
+ *             Return id of cgroup v2 that is ancestor of cgroup associated
+ *             with the *skb* at the *ancestor_level*.  The root cgroup is at
+ *             *ancestor_level* zero and each step down the hierarchy
+ *             increments the level. If *ancestor_level* == level of cgroup
+ *             associated with *skb*, then return value will be same as that
+ *             of **bpf_skb_cgroup_id**\ ().
+ *
+ *             The helper is useful to implement policies based on cgroups
+ *             that are upper in hierarchy than immediate cgroup associated
+ *             with *skb*.
+ *
+ *             The format of returned id and helper limitations are same as in
+ *             **bpf_skb_cgroup_id**\ ().
+ *     Return
+ *             The id is returned or 0 in case the id could not be retrieved.
+ *
  * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
  *     Description
  *             Look for TCP socket matching *tuple*, optionally in a child
@@ -2289,6 +2279,16 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
+ * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ *     Description
+ *             Push an element *value* in *map*. *flags* is one of:
+ *
+ *             **BPF_EXIST**
+ *                     If the queue/stack is full, the oldest element is
+ *                     removed to make room for this.
+ *     Return
+ *             0 on success, or a negative error in case of failure.
+ *
  * int bpf_map_pop_elem(struct bpf_map *map, void *value)
  *     Description
  *             Pop an element from *map*.
@@ -2343,29 +2343,94 @@ union bpf_attr {
  *     Return
  *             0
  *
+ * int bpf_spin_lock(struct bpf_spin_lock *lock)
+ *     Description
+ *             Acquire a spinlock represented by the pointer *lock*, which is
+ *             stored as part of a value of a map. Taking the lock allows to
+ *             safely update the rest of the fields in that value. The
+ *             spinlock can (and must) later be released with a call to
+ *             **bpf_spin_unlock**\ (\ *lock*\ ).
+ *
+ *             Spinlocks in BPF programs come with a number of restrictions
+ *             and constraints:
+ *
+ *             * **bpf_spin_lock** objects are only allowed inside maps of
+ *               types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
+ *               list could be extended in the future).
+ *             * BTF description of the map is mandatory.
+ *             * The BPF program can take ONE lock at a time, since taking two
+ *               or more could cause dead locks.
+ *             * Only one **struct bpf_spin_lock** is allowed per map element.
+ *             * When the lock is taken, calls (either BPF to BPF or helpers)
+ *               are not allowed.
+ *             * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
+ *               allowed inside a spinlock-ed region.
+ *             * The BPF program MUST call **bpf_spin_unlock**\ () to release
+ *               the lock, on all execution paths, before it returns.
+ *             * The BPF program can access **struct bpf_spin_lock** only via
+ *               the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
+ *               helpers. Loading or storing data into the **struct
+ *               bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
+ *             * To use the **bpf_spin_lock**\ () helper, the BTF description
+ *               of the map value must be a struct and have **struct
+ *               bpf_spin_lock** *anyname*\ **;** field at the top level.
+ *               Nested lock inside another struct is not allowed.
+ *             * The **struct bpf_spin_lock** *lock* field in a map value must
+ *               be aligned on a multiple of 4 bytes in that value.
+ *             * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
+ *               the **bpf_spin_lock** field to user space.
+ *             * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
+ *               a BPF program, do not update the **bpf_spin_lock** field.
+ *             * **bpf_spin_lock** cannot be on the stack or inside a
+ *               networking packet (it can only be inside of a map values).
+ *             * **bpf_spin_lock** is available to root only.
+ *             * Tracing programs and socket filter programs cannot use
+ *               **bpf_spin_lock**\ () due to insufficient preemption checks
+ *               (but this may change in the future).
+ *             * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
+ *     Return
+ *             0
+ *
+ * int bpf_spin_unlock(struct bpf_spin_lock *lock)
+ *     Description
+ *             Release the *lock* previously locked by a call to
+ *             **bpf_spin_lock**\ (\ *lock*\ ).
+ *     Return
+ *             0
+ *
  * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
  *     Description
  *             This helper gets a **struct bpf_sock** pointer such
- *             that all the fields in bpf_sock can be accessed.
+ *             that all the fields in this **bpf_sock** can be accessed.
  *     Return
- *             A **struct bpf_sock** pointer on success, or NULL in
+ *             A **struct bpf_sock** pointer on success, or **NULL** in
  *             case of failure.
  *
  * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
  *     Description
  *             This helper gets a **struct bpf_tcp_sock** pointer from a
  *             **struct bpf_sock** pointer.
- *
  *     Return
- *             A **struct bpf_tcp_sock** pointer on success, or NULL in
+ *             A **struct bpf_tcp_sock** pointer on success, or **NULL** in
  *             case of failure.
  *
  * int bpf_skb_ecn_set_ce(struct sk_buf *skb)
- *     Description
- *             Sets ECN of IP header to ce (congestion encountered) if
- *             current value is ect (ECN capable). Works with IPv6 and IPv4.
- *     Return
- *             1 if set, 0 if not set.
+ *     Description
+ *             Set ECN (Explicit Congestion Notification) field of IP header
+ *             to **CE** (Congestion Encountered) if current value is **ECT**
+ *             (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
+ *             and IPv4.
+ *     Return
+ *             1 if the **CE** flag is set (either by the current helper call
+ *             or because it was already present), 0 if it is not set.
+ *
+ * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
+ *     Description
+ *             Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
+ *             **bpf_sk_release**\ () is unnecessary and not allowed.
+ *     Return
+ *             A **struct bpf_sock** pointer on success, or **NULL** in
+ *             case of failure.
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -2465,7 +2530,8 @@ union bpf_attr {
        FN(spin_unlock),                \
        FN(sk_fullsock),                \
        FN(tcp_sock),                   \
-       FN(skb_ecn_set_ce),
+       FN(skb_ecn_set_ce),             \
+       FN(get_listener_sock),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
index 3652b239dad1d7c556ff28a7a9a87129aa9b89d3..d473e5ed044c71df4e734f91c96f828952834dc3 100644 (file)
@@ -1591,7 +1591,7 @@ enum ethtool_link_mode_bit_indices {
 
 static inline int ethtool_validate_speed(__u32 speed)
 {
-       return speed <= INT_MAX || speed == SPEED_UNKNOWN;
+       return speed <= INT_MAX || speed == (__u32)SPEED_UNKNOWN;
 }
 
 /* Duplex, half or full. */
index 7f14d4a66c28c1c13d1388c6dacfcff30711edab..64cee116928ebd92d97acec08c273e02c89958ab 100644 (file)
 #define KEY_TITLE              0x171
 #define KEY_SUBTITLE           0x172
 #define KEY_ANGLE              0x173
-#define KEY_ZOOM               0x174
+#define KEY_FULL_SCREEN                0x174   /* AC View Toggle */
+#define KEY_ZOOM               KEY_FULL_SCREEN
 #define KEY_MODE               0x175
 #define KEY_KEYBOARD           0x176
-#define KEY_SCREEN             0x177
+#define KEY_ASPECT_RATIO       0x177   /* HUTRR37: Aspect */
+#define KEY_SCREEN             KEY_ASPECT_RATIO
 #define KEY_PC                 0x178   /* Media Select Computer */
 #define KEY_TV                 0x179   /* Media Select TV */
 #define KEY_TV2                        0x17a   /* Media Select Cable */
index 0e68024f36c712dcb295f6214914ea5dc3371a25..26f39816af14c149ab1d8be5842112f4bf36c18c 100644 (file)
@@ -102,6 +102,66 @@ enum vmmdev_request_type {
 #define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32
 #endif
 
+/* vmmdev_request_header.requestor defines */
+
+/* Requestor user not given. */
+#define VMMDEV_REQUESTOR_USR_NOT_GIVEN                      0x00000000
+/* The kernel driver (vboxguest) is the requestor. */
+#define VMMDEV_REQUESTOR_USR_DRV                            0x00000001
+/* Some other kernel driver is the requestor. */
+#define VMMDEV_REQUESTOR_USR_DRV_OTHER                      0x00000002
+/* The root or a admin user is the requestor. */
+#define VMMDEV_REQUESTOR_USR_ROOT                           0x00000003
+/* Regular joe user is making the request. */
+#define VMMDEV_REQUESTOR_USR_USER                           0x00000006
+/* User classification mask. */
+#define VMMDEV_REQUESTOR_USR_MASK                           0x00000007
+
+/* Kernel mode request. Note this is 0, check for !USERMODE instead. */
+#define VMMDEV_REQUESTOR_KERNEL                             0x00000000
+/* User mode request. */
+#define VMMDEV_REQUESTOR_USERMODE                           0x00000008
+/* User or kernel mode classification mask. */
+#define VMMDEV_REQUESTOR_MODE_MASK                          0x00000008
+
+/* Don't know the physical console association of the requestor. */
+#define VMMDEV_REQUESTOR_CON_DONT_KNOW                      0x00000000
+/*
+ * The request originates with a process that is NOT associated with the
+ * physical console.
+ */
+#define VMMDEV_REQUESTOR_CON_NO                             0x00000010
+/* Requestor process is associated with the physical console. */
+#define VMMDEV_REQUESTOR_CON_YES                            0x00000020
+/* Console classification mask. */
+#define VMMDEV_REQUESTOR_CON_MASK                           0x00000030
+
+/* Requestor is member of special VirtualBox user group. */
+#define VMMDEV_REQUESTOR_GRP_VBOX                           0x00000080
+
+/* Note: trust level is for windows guests only, linux always uses not-given */
+/* Requestor trust level: Unspecified */
+#define VMMDEV_REQUESTOR_TRUST_NOT_GIVEN                    0x00000000
+/* Requestor trust level: Untrusted (SID S-1-16-0) */
+#define VMMDEV_REQUESTOR_TRUST_UNTRUSTED                    0x00001000
+/* Requestor trust level: Untrusted (SID S-1-16-4096) */
+#define VMMDEV_REQUESTOR_TRUST_LOW                          0x00002000
+/* Requestor trust level: Medium (SID S-1-16-8192) */
+#define VMMDEV_REQUESTOR_TRUST_MEDIUM                       0x00003000
+/* Requestor trust level: Medium plus (SID S-1-16-8448) */
+#define VMMDEV_REQUESTOR_TRUST_MEDIUM_PLUS                  0x00004000
+/* Requestor trust level: High (SID S-1-16-12288) */
+#define VMMDEV_REQUESTOR_TRUST_HIGH                         0x00005000
+/* Requestor trust level: System (SID S-1-16-16384) */
+#define VMMDEV_REQUESTOR_TRUST_SYSTEM                       0x00006000
+/* Requestor trust level >= Protected (SID S-1-16-20480, S-1-16-28672) */
+#define VMMDEV_REQUESTOR_TRUST_PROTECTED                    0x00007000
+/* Requestor trust level mask */
+#define VMMDEV_REQUESTOR_TRUST_MASK                         0x00007000
+
+/* Requestor is using the less trusted user device node (/dev/vboxuser) */
+#define VMMDEV_REQUESTOR_USER_DEVICE                        0x00008000
+
 /** HGCM service location types. */
 enum vmmdev_hgcm_service_location_type {
        VMMDEV_HGCM_LOC_INVALID    = 0,
index 87b3198f4b5d7aa02c330f59178d8cc74dba5f6c..f4d4010b7e3e54f2bfc1d64a708c8454b5899b68 100644 (file)
@@ -238,6 +238,7 @@ enum mlx5_ib_query_dev_resp_flags {
        MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
        MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD  = 1 << 1,
        MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
+       MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT = 1 << 3,
 };
 
 enum mlx5_ib_tunnel_offloads {
index 404d4b9ffe7644553a1b59fba043b151d935a2e9..df1153cea0b7ee2a27e19682837f81922fef353e 100644 (file)
@@ -32,6 +32,7 @@
 
 #ifndef __KERNEL__
 #include <stdlib.h>
+#include <time.h>
 #endif
 
 /*
index 598e278b46f743d777e6f9375ac1c932896ef99c..7d4025d665eb95ee439ddb4e5e564aff8fa09c24 100644 (file)
@@ -582,6 +582,8 @@ asmlinkage __visible void __init start_kernel(void)
        page_alloc_init();
 
        pr_notice("Kernel command line: %s\n", boot_command_line);
+       /* parameters may set static keys */
+       jump_label_init();
        parse_early_param();
        after_dashes = parse_args("Booting kernel",
                                  static_command_line, __start___param,
@@ -591,8 +593,6 @@ asmlinkage __visible void __init start_kernel(void)
                parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
                           NULL, set_init_arg);
 
-       jump_label_init();
-
        /*
         * These use large bootmem allocations and must precede
         * kmem_cache_init()
index fbba478ae52294f5306818deb15a9259b0132d53..e335953fa70407c39b18243f9aad7d1634e9b68a 100644 (file)
@@ -229,7 +229,7 @@ config MUTEX_SPIN_ON_OWNER
 
 config RWSEM_SPIN_ON_OWNER
        def_bool y
-       depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
+       depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW
 
 config LOCK_SPIN_ON_OWNER
        def_bool y
index 6c57e78817dada87e0db328c22349018f6e8b2e6..62471e75a2b0a62747ec77f8c8f3bf8af6f260eb 100644 (file)
@@ -30,6 +30,7 @@ KCOV_INSTRUMENT_extable.o := n
 # Don't self-instrument.
 KCOV_INSTRUMENT_kcov.o := n
 KASAN_SANITIZE_kcov.o := n
+CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
 
 # cond_syscall is currently not LTO compatible
 CFLAGS_sys_ni.o = $(DISABLE_LTO)
index 1323360d90e375758825ff90c597b663c9d3c4d7..a563c8fdad0d21d6f1d2778bde0657aaa3d21ba6 100644 (file)
@@ -48,19 +48,14 @@ static void backtrace_test_irq(void)
 #ifdef CONFIG_STACKTRACE
 static void backtrace_test_saved(void)
 {
-       struct stack_trace trace;
        unsigned long entries[8];
+       unsigned int nr_entries;
 
        pr_info("Testing a saved backtrace.\n");
        pr_info("The following trace is a kernel self test and not a bug!\n");
 
-       trace.nr_entries = 0;
-       trace.max_entries = ARRAY_SIZE(entries);
-       trace.entries = entries;
-       trace.skip = 0;
-
-       save_stack_trace(&trace);
-       print_stack_trace(&trace, 0);
+       nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
+       stack_trace_print(entries, nr_entries, 0);
 }
 #else
 static void backtrace_test_saved(void)
index 8974b3755670e37b0540f3d48ee0b29da951a341..3c18260403dde1df951448c600b6ef9ac61f5635 100644 (file)
@@ -162,10 +162,14 @@ static void cpu_map_kthread_stop(struct work_struct *work)
 static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
                                         struct xdp_frame *xdpf)
 {
+       unsigned int hard_start_headroom;
        unsigned int frame_size;
        void *pkt_data_start;
        struct sk_buff *skb;
 
+       /* Part of headroom was reserved to xdpf */
+       hard_start_headroom = sizeof(struct xdp_frame) +  xdpf->headroom;
+
        /* build_skb need to place skb_shared_info after SKB end, and
         * also want to know the memory "truesize".  Thus, need to
         * know the memory frame size backing xdp_buff.
@@ -183,15 +187,15 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
         * is not at a fixed memory location, with mixed length
         * packets, which is bad for cache-line hotness.
         */
-       frame_size = SKB_DATA_ALIGN(xdpf->len + xdpf->headroom) +
+       frame_size = SKB_DATA_ALIGN(xdpf->len + hard_start_headroom) +
                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
-       pkt_data_start = xdpf->data - xdpf->headroom;
+       pkt_data_start = xdpf->data - hard_start_headroom;
        skb = build_skb(pkt_data_start, frame_size);
        if (!skb)
                return NULL;
 
-       skb_reserve(skb, xdpf->headroom);
+       skb_reserve(skb, hard_start_headroom);
        __skb_put(skb, xdpf->len);
        if (xdpf->metasize)
                skb_metadata_set(skb, xdpf->metasize);
@@ -205,6 +209,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
         * - RX ring dev queue index    (skb_record_rx_queue)
         */
 
+       /* Allow SKB to reuse area used by xdp_frame */
+       xdp_scrub_frame(xdpf);
+
        return skb;
 }
 
index 2ada5e21dfa62175d6cf9667ed4636e4c4ada659..4a8f390a2b821db8cff26f9716952b36013ab152 100644 (file)
@@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ
 }
 EXPORT_SYMBOL(bpf_prog_get_type_path);
 
-static void bpf_evict_inode(struct inode *inode)
-{
-       enum bpf_type type;
-
-       truncate_inode_pages_final(&inode->i_data);
-       clear_inode(inode);
-
-       if (S_ISLNK(inode->i_mode))
-               kfree(inode->i_link);
-       if (!bpf_inode_type(inode, &type))
-               bpf_any_put(inode->i_private, type);
-}
-
 /*
  * Display the mount options in /proc/mounts.
  */
@@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
        return 0;
 }
 
+static void bpf_destroy_inode_deferred(struct rcu_head *head)
+{
+       struct inode *inode = container_of(head, struct inode, i_rcu);
+       enum bpf_type type;
+
+       if (S_ISLNK(inode->i_mode))
+               kfree(inode->i_link);
+       if (!bpf_inode_type(inode, &type))
+               bpf_any_put(inode->i_private, type);
+       free_inode_nonrcu(inode);
+}
+
+static void bpf_destroy_inode(struct inode *inode)
+{
+       call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred);
+}
+
 static const struct super_operations bpf_super_ops = {
        .statfs         = simple_statfs,
        .drop_inode     = generic_delete_inode,
        .show_options   = bpf_show_options,
-       .evict_inode    = bpf_evict_inode,
+       .destroy_inode  = bpf_destroy_inode,
 };
 
 enum {
index 62f6bced3a3c486732dd871693d5d44cf19ab8c2..afca36f53c492718820ecacdb588af585dbb50e4 100644 (file)
@@ -136,21 +136,29 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
 
 void *bpf_map_area_alloc(size_t size, int numa_node)
 {
-       /* We definitely need __GFP_NORETRY, so OOM killer doesn't
-        * trigger under memory pressure as we really just want to
-        * fail instead.
+       /* We really just want to fail instead of triggering OOM killer
+        * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
+        * which is used for lower order allocation requests.
+        *
+        * It has been observed that higher order allocation requests done by
+        * vmalloc with __GFP_NORETRY being set might fail due to not trying
+        * to reclaim memory from the page cache, thus we set
+        * __GFP_RETRY_MAYFAIL to avoid such situations.
         */
-       const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
+
+       const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
        void *area;
 
        if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
-               area = kmalloc_node(size, GFP_USER | flags, numa_node);
+               area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
+                                   numa_node);
                if (area != NULL)
                        return area;
        }
 
-       return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags,
-                                          __builtin_return_address(0));
+       return __vmalloc_node_flags_caller(size, numa_node,
+                                          GFP_KERNEL | __GFP_RETRY_MAYFAIL |
+                                          flags, __builtin_return_address(0));
 }
 
 void bpf_map_area_free(void *area)
index ce166a002d161a08eff6bdbb77158886dbba8012..09d5d972c9ff20c9fe69ca4ffbbbb185a998b56d 100644 (file)
@@ -212,7 +212,7 @@ struct bpf_call_arg_meta {
        int access_size;
        s64 msize_smax_value;
        u64 msize_umax_value;
-       int ptr_id;
+       int ref_obj_id;
        int func_id;
 };
 
@@ -346,35 +346,23 @@ static bool reg_type_may_be_null(enum bpf_reg_type type)
               type == PTR_TO_TCP_SOCK_OR_NULL;
 }
 
-static bool type_is_refcounted(enum bpf_reg_type type)
-{
-       return type == PTR_TO_SOCKET;
-}
-
-static bool type_is_refcounted_or_null(enum bpf_reg_type type)
-{
-       return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL;
-}
-
-static bool reg_is_refcounted(const struct bpf_reg_state *reg)
-{
-       return type_is_refcounted(reg->type);
-}
-
 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
 {
        return reg->type == PTR_TO_MAP_VALUE &&
                map_value_has_spin_lock(reg->map_ptr);
 }
 
-static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg)
+static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
 {
-       return type_is_refcounted_or_null(reg->type);
+       return type == PTR_TO_SOCKET ||
+               type == PTR_TO_SOCKET_OR_NULL ||
+               type == PTR_TO_TCP_SOCK ||
+               type == PTR_TO_TCP_SOCK_OR_NULL;
 }
 
-static bool arg_type_is_refcounted(enum bpf_arg_type type)
+static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
 {
-       return type == ARG_PTR_TO_SOCKET;
+       return type == ARG_PTR_TO_SOCK_COMMON;
 }
 
 /* Determine whether the function releases some resources allocated by another
@@ -392,6 +380,12 @@ static bool is_acquire_function(enum bpf_func_id func_id)
                func_id == BPF_FUNC_sk_lookup_udp;
 }
 
+static bool is_ptr_cast_function(enum bpf_func_id func_id)
+{
+       return func_id == BPF_FUNC_tcp_sock ||
+               func_id == BPF_FUNC_sk_fullsock;
+}
+
 /* string representation of 'enum bpf_reg_type' */
 static const char * const reg_type_str[] = {
        [NOT_INIT]              = "?",
@@ -466,6 +460,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
                                verbose(env, ",call_%d", func(env, reg)->callsite);
                } else {
                        verbose(env, "(id=%d", reg->id);
+                       if (reg_type_may_be_refcounted_or_null(t))
+                               verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
                        if (t != SCALAR_VALUE)
                                verbose(env, ",off=%d", reg->off);
                        if (type_is_pkt_pointer(t))
@@ -1901,8 +1897,9 @@ continue_func:
                }
                frame++;
                if (frame >= MAX_CALL_FRAMES) {
-                       WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
-                       return -EFAULT;
+                       verbose(env, "the call stack of %d frames is too deep !\n",
+                               frame);
+                       return -E2BIG;
                }
                goto process_func;
        }
@@ -2414,16 +2411,15 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
                /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
                if (!type_is_sk_pointer(type))
                        goto err_type;
-       } else if (arg_type == ARG_PTR_TO_SOCKET) {
-               expected_type = PTR_TO_SOCKET;
-               if (type != expected_type)
-                       goto err_type;
-               if (meta->ptr_id || !reg->id) {
-                       verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n",
-                               meta->ptr_id, reg->id);
-                       return -EFAULT;
+               if (reg->ref_obj_id) {
+                       if (meta->ref_obj_id) {
+                               verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
+                                       regno, reg->ref_obj_id,
+                                       meta->ref_obj_id);
+                               return -EFAULT;
+                       }
+                       meta->ref_obj_id = reg->ref_obj_id;
                }
-               meta->ptr_id = reg->id;
        } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
                if (meta->func_id == BPF_FUNC_spin_lock) {
                        if (process_spin_lock(env, regno, true))
@@ -2740,32 +2736,38 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
        return true;
 }
 
-static bool check_refcount_ok(const struct bpf_func_proto *fn)
+static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
 {
        int count = 0;
 
-       if (arg_type_is_refcounted(fn->arg1_type))
+       if (arg_type_may_be_refcounted(fn->arg1_type))
                count++;
-       if (arg_type_is_refcounted(fn->arg2_type))
+       if (arg_type_may_be_refcounted(fn->arg2_type))
                count++;
-       if (arg_type_is_refcounted(fn->arg3_type))
+       if (arg_type_may_be_refcounted(fn->arg3_type))
                count++;
-       if (arg_type_is_refcounted(fn->arg4_type))
+       if (arg_type_may_be_refcounted(fn->arg4_type))
                count++;
-       if (arg_type_is_refcounted(fn->arg5_type))
+       if (arg_type_may_be_refcounted(fn->arg5_type))
                count++;
 
+       /* A reference acquiring function cannot acquire
+        * another refcounted ptr.
+        */
+       if (is_acquire_function(func_id) && count)
+               return false;
+
        /* We only support one arg being unreferenced at the moment,
         * which is sufficient for the helper functions we have right now.
         */
        return count <= 1;
 }
 
-static int check_func_proto(const struct bpf_func_proto *fn)
+static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
 {
        return check_raw_mode_ok(fn) &&
               check_arg_pair_ok(fn) &&
-              check_refcount_ok(fn) ? 0 : -EINVAL;
+              check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
 }
 
 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
@@ -2799,19 +2801,20 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
 }
 
 static void release_reg_references(struct bpf_verifier_env *env,
-                                  struct bpf_func_state *state, int id)
+                                  struct bpf_func_state *state,
+                                  int ref_obj_id)
 {
        struct bpf_reg_state *regs = state->regs, *reg;
        int i;
 
        for (i = 0; i < MAX_BPF_REG; i++)
-               if (regs[i].id == id)
+               if (regs[i].ref_obj_id == ref_obj_id)
                        mark_reg_unknown(env, regs, i);
 
        bpf_for_each_spilled_reg(i, state, reg) {
                if (!reg)
                        continue;
-               if (reg_is_refcounted(reg) && reg->id == id)
+               if (reg->ref_obj_id == ref_obj_id)
                        __mark_reg_unknown(reg);
        }
 }
@@ -2820,15 +2823,20 @@ static void release_reg_references(struct bpf_verifier_env *env,
  * resources. Identify all copies of the same pointer and clear the reference.
  */
 static int release_reference(struct bpf_verifier_env *env,
-                            struct bpf_call_arg_meta *meta)
+                            int ref_obj_id)
 {
        struct bpf_verifier_state *vstate = env->cur_state;
+       int err;
        int i;
 
+       err = release_reference_state(cur_func(env), ref_obj_id);
+       if (err)
+               return err;
+
        for (i = 0; i <= vstate->curframe; i++)
-               release_reg_references(env, vstate->frame[i], meta->ptr_id);
+               release_reg_references(env, vstate->frame[i], ref_obj_id);
 
-       return release_reference_state(cur_func(env), meta->ptr_id);
+       return 0;
 }
 
 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
@@ -3047,7 +3055,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
        memset(&meta, 0, sizeof(meta));
        meta.pkt_access = fn->pkt_access;
 
-       err = check_func_proto(fn);
+       err = check_func_proto(fn, func_id);
        if (err) {
                verbose(env, "kernel subsystem misconfigured func %s#%d\n",
                        func_id_name(func_id), func_id);
@@ -3093,7 +3101,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
                        return err;
                }
        } else if (is_release_function(func_id)) {
-               err = release_reference(env, &meta);
+               err = release_reference(env, meta.ref_obj_id);
                if (err) {
                        verbose(env, "func %s#%d reference has not been acquired before\n",
                                func_id_name(func_id), func_id);
@@ -3154,8 +3162,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
 
                        if (id < 0)
                                return id;
-                       /* For release_reference() */
+                       /* For mark_ptr_or_null_reg() */
                        regs[BPF_REG_0].id = id;
+                       /* For release_reference() */
+                       regs[BPF_REG_0].ref_obj_id = id;
                } else {
                        /* For mark_ptr_or_null_reg() */
                        regs[BPF_REG_0].id = ++env->id_gen;
@@ -3170,6 +3180,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
                return -EINVAL;
        }
 
+       if (is_ptr_cast_function(func_id))
+               /* For release_reference() */
+               regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
+
        do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
 
        err = check_map_func_compatibility(env, meta.map_ptr, func_id);
@@ -3368,7 +3382,7 @@ do_sim:
                *dst_reg = *ptr_reg;
        }
        ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
-       if (!ptr_is_dst_reg)
+       if (!ptr_is_dst_reg && ret)
                *dst_reg = tmp;
        return !ret ? -EFAULT : 0;
 }
@@ -4124,15 +4138,35 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
        return 0;
 }
 
+static void __find_good_pkt_pointers(struct bpf_func_state *state,
+                                    struct bpf_reg_state *dst_reg,
+                                    enum bpf_reg_type type, u16 new_range)
+{
+       struct bpf_reg_state *reg;
+       int i;
+
+       for (i = 0; i < MAX_BPF_REG; i++) {
+               reg = &state->regs[i];
+               if (reg->type == type && reg->id == dst_reg->id)
+                       /* keep the maximum range already checked */
+                       reg->range = max(reg->range, new_range);
+       }
+
+       bpf_for_each_spilled_reg(i, state, reg) {
+               if (!reg)
+                       continue;
+               if (reg->type == type && reg->id == dst_reg->id)
+                       reg->range = max(reg->range, new_range);
+       }
+}
+
 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
                                   struct bpf_reg_state *dst_reg,
                                   enum bpf_reg_type type,
                                   bool range_right_open)
 {
-       struct bpf_func_state *state = vstate->frame[vstate->curframe];
-       struct bpf_reg_state *regs = state->regs, *reg;
        u16 new_range;
-       int i, j;
+       int i;
 
        if (dst_reg->off < 0 ||
            (dst_reg->off == 0 && range_right_open))
@@ -4197,20 +4231,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
         * the range won't allow anything.
         * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
         */
-       for (i = 0; i < MAX_BPF_REG; i++)
-               if (regs[i].type == type && regs[i].id == dst_reg->id)
-                       /* keep the maximum range already checked */
-                       regs[i].range = max(regs[i].range, new_range);
-
-       for (j = 0; j <= vstate->curframe; j++) {
-               state = vstate->frame[j];
-               bpf_for_each_spilled_reg(i, state, reg) {
-                       if (!reg)
-                               continue;
-                       if (reg->type == type && reg->id == dst_reg->id)
-                               reg->range = max(reg->range, new_range);
-               }
-       }
+       for (i = 0; i <= vstate->curframe; i++)
+               __find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
+                                        new_range);
 }
 
 /* compute branch direction of the expression "if (reg opcode val) goto target;"
@@ -4665,17 +4688,41 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
                } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
                        reg->type = PTR_TO_TCP_SOCK;
                }
-               if (is_null || !(reg_is_refcounted(reg) ||
-                                reg_may_point_to_spin_lock(reg))) {
-                       /* We don't need id from this point onwards anymore,
-                        * thus we should better reset it, so that state
-                        * pruning has chances to take effect.
+               if (is_null) {
+                       /* We don't need id and ref_obj_id from this point
+                        * onwards anymore, thus we should better reset it,
+                        * so that state pruning has chances to take effect.
+                        */
+                       reg->id = 0;
+                       reg->ref_obj_id = 0;
+               } else if (!reg_may_point_to_spin_lock(reg)) {
+                       /* For not-NULL ptr, reg->ref_obj_id will be reset
+                        * in release_reg_references().
+                        *
+                        * reg->id is still used by spin_lock ptr. Other
+                        * than spin_lock ptr type, reg->id can be reset.
                         */
                        reg->id = 0;
                }
        }
 }
 
+static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
+                                   bool is_null)
+{
+       struct bpf_reg_state *reg;
+       int i;
+
+       for (i = 0; i < MAX_BPF_REG; i++)
+               mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
+
+       bpf_for_each_spilled_reg(i, state, reg) {
+               if (!reg)
+                       continue;
+               mark_ptr_or_null_reg(state, reg, id, is_null);
+       }
+}
+
 /* The logic is similar to find_good_pkt_pointers(), both could eventually
  * be folded together at some point.
  */
@@ -4683,24 +4730,20 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
                                  bool is_null)
 {
        struct bpf_func_state *state = vstate->frame[vstate->curframe];
-       struct bpf_reg_state *reg, *regs = state->regs;
+       struct bpf_reg_state *regs = state->regs;
+       u32 ref_obj_id = regs[regno].ref_obj_id;
        u32 id = regs[regno].id;
-       int i, j;
-
-       if (reg_is_refcounted_or_null(&regs[regno]) && is_null)
-               release_reference_state(state, id);
+       int i;
 
-       for (i = 0; i < MAX_BPF_REG; i++)
-               mark_ptr_or_null_reg(state, &regs[i], id, is_null);
+       if (ref_obj_id && ref_obj_id == id && is_null)
+               /* regs[regno] is in the " == NULL" branch.
+                * No one could have freed the reference state before
+                * doing the NULL check.
+                */
+               WARN_ON_ONCE(release_reference_state(state, id));
 
-       for (j = 0; j <= vstate->curframe; j++) {
-               state = vstate->frame[j];
-               bpf_for_each_spilled_reg(i, state, reg) {
-                       if (!reg)
-                               continue;
-                       mark_ptr_or_null_reg(state, reg, id, is_null);
-               }
-       }
+       for (i = 0; i <= vstate->curframe; i++)
+               __mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
 }
 
 static bool try_match_pkt_pointers(const struct bpf_insn *insn,
@@ -6052,15 +6095,17 @@ static int propagate_liveness(struct bpf_verifier_env *env,
        }
        /* Propagate read liveness of registers... */
        BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
-       /* We don't need to worry about FP liveness because it's read-only */
-       for (i = 0; i < BPF_REG_FP; i++) {
-               if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
-                       continue;
-               if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
-                       err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i],
-                                           &vparent->frame[vstate->curframe]->regs[i]);
-                       if (err)
-                               return err;
+       for (frame = 0; frame <= vstate->curframe; frame++) {
+               /* We don't need to worry about FP liveness, it's read-only */
+               for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
+                       if (vparent->frame[frame]->regs[i].live & REG_LIVE_READ)
+                               continue;
+                       if (vstate->frame[frame]->regs[i].live & REG_LIVE_READ) {
+                               err = mark_reg_read(env, &vstate->frame[frame]->regs[i],
+                                                   &vparent->frame[frame]->regs[i]);
+                               if (err)
+                                       return err;
+                       }
                }
        }
 
index 4834c4214e9cd15f2122b4747b31a66e5b632df9..6a1942ed781c5b1712021299fb8227d389126c00 100644 (file)
@@ -740,11 +740,10 @@ static inline int nr_cpusets(void)
  * Must be called with cpuset_mutex held.
  *
  * The three key local variables below are:
- *    q  - a linked-list queue of cpuset pointers, used to implement a
- *        top-down scan of all cpusets.  This scan loads a pointer
- *        to each cpuset marked is_sched_load_balance into the
- *        array 'csa'.  For our purposes, rebuilding the schedulers
- *        sched domains, we can ignore !is_sched_load_balance cpusets.
+ *    cp - cpuset pointer, used (together with pos_css) to perform a
+ *        top-down scan of all cpusets. For our purposes, rebuilding
+ *        the schedulers sched domains, we can ignore !is_sched_load_
+ *        balance cpusets.
  *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
  *        that need to be load balanced, for convenient iterative
  *        access by the subsequent code that finds the best partition,
@@ -775,7 +774,7 @@ static inline int nr_cpusets(void)
 static int generate_sched_domains(cpumask_var_t **domains,
                        struct sched_domain_attr **attributes)
 {
-       struct cpuset *cp;      /* scans q */
+       struct cpuset *cp;      /* top-down scan of cpusets */
        struct cpuset **csa;    /* array of all cpuset ptrs */
        int csn;                /* how many cpuset ptrs in csa so far */
        int i, j, k;            /* indices for partition finding loops */
index f69ba38573c236435392ec7a76dc814cbb9ef3ea..f2ef10460698e9ec8dcb26dd7a0568c3a1448c54 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/notifier.h>
 #include <linux/sched/signal.h>
 #include <linux/sched/hotplug.h>
+#include <linux/sched/isolation.h>
 #include <linux/sched/task.h>
 #include <linux/sched/smt.h>
 #include <linux/unistd.h>
@@ -564,6 +565,20 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
                cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
 }
 
+static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
+{
+       if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
+               return true;
+       /*
+        * When CPU hotplug is disabled, then taking the CPU down is not
+        * possible because takedown_cpu() and the architecture and
+        * subsystem specific mechanisms are not available. So the CPU
+        * which would be completely unplugged again needs to stay around
+        * in the current state.
+        */
+       return st->state <= CPUHP_BRINGUP_CPU;
+}
+
 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                              enum cpuhp_state target)
 {
@@ -574,8 +589,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                st->state++;
                ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
                if (ret) {
-                       st->target = prev_state;
-                       undo_cpu_up(cpu, st);
+                       if (can_rollback_cpu(st)) {
+                               st->target = prev_state;
+                               undo_cpu_up(cpu, st);
+                       }
                        break;
                }
        }
@@ -1185,8 +1202,15 @@ int freeze_secondary_cpus(int primary)
        int cpu, error = 0;
 
        cpu_maps_update_begin();
-       if (!cpu_online(primary))
+       if (primary == -1) {
                primary = cpumask_first(cpu_online_mask);
+               if (!housekeeping_cpu(primary, HK_FLAG_TIMER))
+                       primary = housekeeping_any_cpu(HK_FLAG_TIMER);
+       } else {
+               if (!cpu_online(primary))
+                       primary = cpumask_first(cpu_online_mask);
+       }
+
        /*
         * We take down all of the non-boot CPUs in one shot to avoid races
         * with the userspace trying to use the CPU hotplug at the same time
@@ -2019,19 +2043,6 @@ static const struct attribute_group cpuhp_cpu_root_attr_group = {
 
 #ifdef CONFIG_HOTPLUG_SMT
 
-static const char *smt_states[] = {
-       [CPU_SMT_ENABLED]               = "on",
-       [CPU_SMT_DISABLED]              = "off",
-       [CPU_SMT_FORCE_DISABLED]        = "forceoff",
-       [CPU_SMT_NOT_SUPPORTED]         = "notsupported",
-};
-
-static ssize_t
-show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
-}
-
 static void cpuhp_offline_cpu_device(unsigned int cpu)
 {
        struct device *dev = get_cpu_device(cpu);
@@ -2102,9 +2113,10 @@ static int cpuhp_smt_enable(void)
        return ret;
 }
 
+
 static ssize_t
-store_smt_control(struct device *dev, struct device_attribute *attr,
-                 const char *buf, size_t count)
+__store_smt_control(struct device *dev, struct device_attribute *attr,
+                   const char *buf, size_t count)
 {
        int ctrlval, ret;
 
@@ -2142,14 +2154,44 @@ store_smt_control(struct device *dev, struct device_attribute *attr,
        unlock_device_hotplug();
        return ret ? ret : count;
 }
+
+#else /* !CONFIG_HOTPLUG_SMT */
+static ssize_t
+__store_smt_control(struct device *dev, struct device_attribute *attr,
+                   const char *buf, size_t count)
+{
+       return -ENODEV;
+}
+#endif /* CONFIG_HOTPLUG_SMT */
+
+static const char *smt_states[] = {
+       [CPU_SMT_ENABLED]               = "on",
+       [CPU_SMT_DISABLED]              = "off",
+       [CPU_SMT_FORCE_DISABLED]        = "forceoff",
+       [CPU_SMT_NOT_SUPPORTED]         = "notsupported",
+       [CPU_SMT_NOT_IMPLEMENTED]       = "notimplemented",
+};
+
+static ssize_t
+show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       const char *state = smt_states[cpu_smt_control];
+
+       return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
+}
+
+static ssize_t
+store_smt_control(struct device *dev, struct device_attribute *attr,
+                 const char *buf, size_t count)
+{
+       return __store_smt_control(dev, attr, buf, count);
+}
 static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
 
 static ssize_t
 show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       bool active = topology_max_smt_threads() > 1;
-
-       return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
+       return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
 }
 static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
 
@@ -2165,21 +2207,17 @@ static const struct attribute_group cpuhp_smt_attr_group = {
        NULL
 };
 
-static int __init cpu_smt_state_init(void)
+static int __init cpu_smt_sysfs_init(void)
 {
        return sysfs_create_group(&cpu_subsys.dev_root->kobj,
                                  &cpuhp_smt_attr_group);
 }
 
-#else
-static inline int cpu_smt_state_init(void) { return 0; }
-#endif
-
 static int __init cpuhp_sysfs_init(void)
 {
        int cpu, ret;
 
-       ret = cpu_smt_state_init();
+       ret = cpu_smt_sysfs_init();
        if (ret)
                return ret;
 
@@ -2200,7 +2238,7 @@ static int __init cpuhp_sysfs_init(void)
        return 0;
 }
 device_initcall(cpuhp_sysfs_init);
-#endif
+#endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
 
 /*
  * cpu_bit_bitmap[] is a special, "compressed" data structure that
@@ -2290,3 +2328,18 @@ void __init boot_cpu_hotplug_init(void)
 #endif
        this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
 }
+
+enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
+
+static int __init mitigations_parse_cmdline(char *arg)
+{
+       if (!strcmp(arg, "off"))
+               cpu_mitigations = CPU_MITIGATIONS_OFF;
+       else if (!strcmp(arg, "auto"))
+               cpu_mitigations = CPU_MITIGATIONS_AUTO;
+       else if (!strcmp(arg, "auto,nosmt"))
+               cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
+
+       return 0;
+}
+early_param("mitigations", mitigations_parse_cmdline);
index 45d51e8e26f62f27b8b91c3e53ddb2df20fd791b..badd77670d00510b94336c715c13f73cf5052e24 100644 (file)
@@ -89,8 +89,8 @@ struct dma_debug_entry {
        int              sg_mapped_ents;
        enum map_err_types  map_err_type;
 #ifdef CONFIG_STACKTRACE
-       struct           stack_trace stacktrace;
-       unsigned long    st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
+       unsigned int    stack_len;
+       unsigned long   stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
 #endif
 };
 
@@ -174,7 +174,7 @@ static inline void dump_entry_trace(struct dma_debug_entry *entry)
 #ifdef CONFIG_STACKTRACE
        if (entry) {
                pr_warning("Mapped at:\n");
-               print_stack_trace(&entry->stacktrace, 0);
+               stack_trace_print(entry->stack_entries, entry->stack_len, 0);
        }
 #endif
 }
@@ -704,12 +704,10 @@ static struct dma_debug_entry *dma_entry_alloc(void)
        spin_unlock_irqrestore(&free_entries_lock, flags);
 
 #ifdef CONFIG_STACKTRACE
-       entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
-       entry->stacktrace.entries = entry->st_entries;
-       entry->stacktrace.skip = 2;
-       save_stack_trace(&entry->stacktrace);
+       entry->stack_len = stack_trace_save(entry->stack_entries,
+                                           ARRAY_SIZE(entry->stack_entries),
+                                           1);
 #endif
-
        return entry;
 }
 
index 1032a16bd1866d228623f4ef1f572fcd5cac64f8..abbd4b3b96c2a2a1a75dde8b1640b0b286e7c344 100644 (file)
@@ -2009,8 +2009,8 @@ event_sched_out(struct perf_event *event,
        event->pmu->del(event, 0);
        event->oncpu = -1;
 
-       if (event->pending_disable) {
-               event->pending_disable = 0;
+       if (READ_ONCE(event->pending_disable) >= 0) {
+               WRITE_ONCE(event->pending_disable, -1);
                state = PERF_EVENT_STATE_OFF;
        }
        perf_event_set_state(event, state);
@@ -2198,7 +2198,8 @@ EXPORT_SYMBOL_GPL(perf_event_disable);
 
 void perf_event_disable_inatomic(struct perf_event *event)
 {
-       event->pending_disable = 1;
+       WRITE_ONCE(event->pending_disable, smp_processor_id());
+       /* can fail, see perf_pending_event_disable() */
        irq_work_queue(&event->pending);
 }
 
@@ -2477,6 +2478,16 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
        perf_pmu_enable(cpuctx->ctx.pmu);
 }
 
+void perf_pmu_resched(struct pmu *pmu)
+{
+       struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+       struct perf_event_context *task_ctx = cpuctx->task_ctx;
+
+       perf_ctx_lock(cpuctx, task_ctx);
+       ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU);
+       perf_ctx_unlock(cpuctx, task_ctx);
+}
+
 /*
  * Cross CPU call to install and enable a performance event
  *
@@ -5810,10 +5821,45 @@ void perf_event_wakeup(struct perf_event *event)
        }
 }
 
+static void perf_pending_event_disable(struct perf_event *event)
+{
+       int cpu = READ_ONCE(event->pending_disable);
+
+       if (cpu < 0)
+               return;
+
+       if (cpu == smp_processor_id()) {
+               WRITE_ONCE(event->pending_disable, -1);
+               perf_event_disable_local(event);
+               return;
+       }
+
+       /*
+        *  CPU-A                       CPU-B
+        *
+        *  perf_event_disable_inatomic()
+        *    @pending_disable = CPU-A;
+        *    irq_work_queue();
+        *
+        *  sched-out
+        *    @pending_disable = -1;
+        *
+        *                              sched-in
+        *                              perf_event_disable_inatomic()
+        *                                @pending_disable = CPU-B;
+        *                                irq_work_queue(); // FAILS
+        *
+        *  irq_work_run()
+        *    perf_pending_event()
+        *
+        * But the event runs on CPU-B and wants disabling there.
+        */
+       irq_work_queue_on(&event->pending, cpu);
+}
+
 static void perf_pending_event(struct irq_work *entry)
 {
-       struct perf_event *event = container_of(entry,
-                       struct perf_event, pending);
+       struct perf_event *event = container_of(entry, struct perf_event, pending);
        int rctx;
 
        rctx = perf_swevent_get_recursion_context();
@@ -5822,10 +5868,7 @@ static void perf_pending_event(struct irq_work *entry)
         * and we won't recurse 'further'.
         */
 
-       if (event->pending_disable) {
-               event->pending_disable = 0;
-               perf_event_disable_local(event);
-       }
+       perf_pending_event_disable(event);
 
        if (event->pending_wakeup) {
                event->pending_wakeup = 0;
@@ -7189,6 +7232,7 @@ static void perf_event_mmap_output(struct perf_event *event,
        struct perf_output_handle handle;
        struct perf_sample_data sample;
        int size = mmap_event->event_id.header.size;
+       u32 type = mmap_event->event_id.header.type;
        int ret;
 
        if (!perf_event_mmap_match(event, data))
@@ -7232,6 +7276,7 @@ static void perf_event_mmap_output(struct perf_event *event,
        perf_output_end(&handle);
 out:
        mmap_event->event_id.header.size = size;
+       mmap_event->event_id.header.type = type;
 }
 
 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
@@ -9042,26 +9087,29 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
        if (task == TASK_TOMBSTONE)
                return;
 
-       if (!ifh->nr_file_filters)
-               return;
-
-       mm = get_task_mm(event->ctx->task);
-       if (!mm)
-               goto restart;
+       if (ifh->nr_file_filters) {
+               mm = get_task_mm(event->ctx->task);
+               if (!mm)
+                       goto restart;
 
-       down_read(&mm->mmap_sem);
+               down_read(&mm->mmap_sem);
+       }
 
        raw_spin_lock_irqsave(&ifh->lock, flags);
        list_for_each_entry(filter, &ifh->list, entry) {
-               event->addr_filter_ranges[count].start = 0;
-               event->addr_filter_ranges[count].size = 0;
+               if (filter->path.dentry) {
+                       /*
+                        * Adjust base offset if the filter is associated to a
+                        * binary that needs to be mapped:
+                        */
+                       event->addr_filter_ranges[count].start = 0;
+                       event->addr_filter_ranges[count].size = 0;
 
-               /*
-                * Adjust base offset if the filter is associated to a binary
-                * that needs to be mapped:
-                */
-               if (filter->path.dentry)
                        perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
+               } else {
+                       event->addr_filter_ranges[count].start = filter->offset;
+                       event->addr_filter_ranges[count].size  = filter->size;
+               }
 
                count++;
        }
@@ -9069,9 +9117,11 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
        event->addr_filters_gen++;
        raw_spin_unlock_irqrestore(&ifh->lock, flags);
 
-       up_read(&mm->mmap_sem);
+       if (ifh->nr_file_filters) {
+               up_read(&mm->mmap_sem);
 
-       mmput(mm);
+               mmput(mm);
+       }
 
 restart:
        perf_event_stop(event, 1);
@@ -10234,6 +10284,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
 
        init_waitqueue_head(&event->waitq);
+       event->pending_disable = -1;
        init_irq_work(&event->pending, perf_pending_event);
 
        mutex_init(&event->mmap_mutex);
@@ -11876,7 +11927,7 @@ static void __init perf_event_init_all_cpus(void)
        }
 }
 
-void perf_swevent_init_cpu(unsigned int cpu)
+static void perf_swevent_init_cpu(unsigned int cpu)
 {
        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
index a4047321d7d8052b40302d4ed8c0aa8e649ba759..674b353834914c437002660f32ddc16489fe7be1 100644 (file)
@@ -392,7 +392,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
                 * store that will be enabled on successful return
                 */
                if (!handle->size) { /* A, matches D */
-                       event->pending_disable = 1;
+                       event->pending_disable = smp_processor_id();
                        perf_output_wakeup(handle);
                        local_set(&rb->aux_nest, 0);
                        goto err_put;
@@ -455,24 +455,21 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
                rb->aux_head += size;
        }
 
-       if (size || handle->aux_flags) {
-               /*
-                * Only send RECORD_AUX if we have something useful to communicate
-                *
-                * Note: the OVERWRITE records by themselves are not considered
-                * useful, as they don't communicate any *new* information,
-                * aside from the short-lived offset, that becomes history at
-                * the next event sched-in and therefore isn't useful.
-                * The userspace that needs to copy out AUX data in overwrite
-                * mode should know to use user_page::aux_head for the actual
-                * offset. So, from now on we don't output AUX records that
-                * have *only* OVERWRITE flag set.
-                */
-
-               if (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE)
-                       perf_event_aux_event(handle->event, aux_head, size,
-                                            handle->aux_flags);
-       }
+       /*
+        * Only send RECORD_AUX if we have something useful to communicate
+        *
+        * Note: the OVERWRITE records by themselves are not considered
+        * useful, as they don't communicate any *new* information,
+        * aside from the short-lived offset, that becomes history at
+        * the next event sched-in and therefore isn't useful.
+        * The userspace that needs to copy out AUX data in overwrite
+        * mode should know to use user_page::aux_head for the actual
+        * offset. So, from now on we don't output AUX records that
+        * have *only* OVERWRITE flag set.
+        */
+       if (size || (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE))
+               perf_event_aux_event(handle->event, aux_head, size,
+                                    handle->aux_flags);
 
        rb->user_page->aux_head = rb->aux_head;
        if (rb_need_aux_wakeup(rb))
@@ -480,7 +477,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
 
        if (wakeup) {
                if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
-                       handle->event->pending_disable = 1;
+                       handle->event->pending_disable = smp_processor_id();
                perf_output_wakeup(handle);
        }
 
@@ -613,8 +610,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
         * PMU requests more than one contiguous chunks of memory
         * for SW double buffering
         */
-       if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
-           !overwrite) {
+       if (!overwrite) {
                if (!max_order)
                        return -EINVAL;
 
index c3b73b0311bc7c79d4b8a34d344cc943d6511b32..9e40cf7be60662f0fb8f6c61a6066532954b3fb1 100644 (file)
@@ -3436,6 +3436,10 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int p
 {
        u32 uval, uninitialized_var(nval), mval;
 
+       /* Futex address must be 32bit aligned */
+       if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
+               return -1;
+
 retry:
        if (get_user(uval, uaddr))
                return -1;
index f7525e14ebc6f15ec1718d0e610586a88798738d..93c26444451011f50a98a9b918e33069e95b0905 100644 (file)
@@ -55,7 +55,7 @@ static void *try_ram_remap(resource_size_t offset, size_t size,
  *
  * MEMREMAP_WB - matches the default mapping for System RAM on
  * the architecture.  This is usually a read-allocate write-back cache.
- * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
+ * Moreover, if MEMREMAP_WB is specified and the requested remap region is RAM
  * memremap() will bypass establishing a new mapping and instead return
  * a pointer into the direct map.
  *
@@ -86,7 +86,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags)
        /* Try all mapping types requested until one returns non-NULL */
        if (flags & MEMREMAP_WB) {
                /*
-                * MEMREMAP_WB is special in that it can be satisifed
+                * MEMREMAP_WB is special in that it can be satisfied
                 * from the direct map.  Some archs depend on the
                 * capability of memremap() to autodetect cases where
                 * the requested range is potentially in System RAM.
index 3faef4a77f7103e004c6a26f9c4074750f23fc73..51128bea3846ca1c15cd622f0889602cd1688b78 100644 (file)
@@ -1449,6 +1449,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
 {
        data = data->parent_data;
+
+       if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
+               return 0;
+
        if (data->chip->irq_set_wake)
                return data->chip->irq_set_wake(data, on);
 
index 5d5378ea0afe316e3d5d569cc541f1ef23b0600a..f6e5515ee0774346c8bd8bcebc93f5b0e38f7230 100644 (file)
@@ -84,8 +84,6 @@ EXPORT_SYMBOL(devm_request_threaded_irq);
  *     @dev: device to request interrupt for
  *     @irq: Interrupt line to allocate
  *     @handler: Function to be called when the IRQ occurs
- *     @thread_fn: function to be called in a threaded interrupt context. NULL
- *                 for devices which handle everything in @handler
  *     @irqflags: Interrupt type flags
  *     @devname: An ascii name for the claiming device, dev_name(dev) if NULL
  *     @dev_id: A cookie passed back to the handler function
@@ -222,9 +220,8 @@ devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct,
                            irq_flow_handler_t handler)
 {
        struct irq_chip_generic *gc;
-       unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
 
-       gc = devm_kzalloc(dev, sz, GFP_KERNEL);
+       gc = devm_kzalloc(dev, struct_size(gc, chip_types, num_ct), GFP_KERNEL);
        if (gc)
                irq_init_generic_chip(gc, name, num_ct,
                                      irq_base, reg_base, handler);
index 13539e12cd8034279c8f324242e4996301203d1a..9f8a709337cf802f2ddbf33f90a956d851f0c5b3 100644 (file)
@@ -558,6 +558,7 @@ int __init early_irq_init(void)
                alloc_masks(&desc[i], node);
                raw_spin_lock_init(&desc[i].lock);
                lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
+               mutex_init(&desc[i].request_mutex);
                desc_set_defaults(i, &desc[i], node, NULL, NULL);
        }
        return arch_early_irq_init();
index 9ec34a2a6638d4a242be9002e85e3ec87fc878a9..53a081392115816651fbd0c050013f96475e14d5 100644 (file)
@@ -196,6 +196,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
        case IRQ_SET_MASK_OK:
        case IRQ_SET_MASK_OK_DONE:
                cpumask_copy(desc->irq_common_data.affinity, mask);
+               /* fall through */
        case IRQ_SET_MASK_OK_NOCOPY:
                irq_validate_effective_affinity(data);
                irq_set_thread_affinity(desc);
@@ -356,8 +357,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
        desc->affinity_notify = notify;
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 
-       if (old_notify)
+       if (old_notify) {
+               cancel_work_sync(&old_notify->work);
                kref_put(&old_notify->kref, old_notify->release);
+       }
 
        return 0;
 }
index 1e4cb63a5c822998ffa89644cdc57ca6291252f5..90c735da15d0071d27c2e2542e0d14ff2a123241 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/idr.h>
 #include <linux/irq.h>
 #include <linux/math64.h>
+#include <linux/log2.h>
 
 #include <trace/events/irq.h>
 
@@ -18,16 +19,6 @@ DEFINE_STATIC_KEY_FALSE(irq_timing_enabled);
 
 DEFINE_PER_CPU(struct irq_timings, irq_timings);
 
-struct irqt_stat {
-       u64     next_evt;
-       u64     last_ts;
-       u64     variance;
-       u32     avg;
-       u32     nr_samples;
-       int     anomalies;
-       int     valid;
-};
-
 static DEFINE_IDR(irqt_stats);
 
 void irq_timings_enable(void)
@@ -40,75 +31,360 @@ void irq_timings_disable(void)
        static_branch_disable(&irq_timing_enabled);
 }
 
-/**
- * irqs_update - update the irq timing statistics with a new timestamp
+/*
+ * The main goal of this algorithm is to predict the next interrupt
+ * occurrence on the current CPU.
+ *
+ * Currently, the interrupt timings are stored in a circular array
+ * buffer every time there is an interrupt, as a tuple: the interrupt
+ * number and the associated timestamp when the event occurred <irq,
+ * timestamp>.
+ *
+ * For every interrupt occurring in a short period of time, we can
+ * measure the elapsed time between the occurrences for the same
+ * interrupt and we end up with a suite of intervals. The experience
+ * showed the interrupts are often coming following a periodic
+ * pattern.
+ *
+ * The objective of the algorithm is to find out this periodic pattern
+ * in a fastest way and use its period to predict the next irq event.
+ *
+ * When the next interrupt event is requested, we are in the situation
+ * where the interrupts are disabled and the circular buffer
+ * containing the timings is filled with the events which happened
+ * after the previous next-interrupt-event request.
+ *
+ * At this point, we read the circular buffer and we fill the irq
+ * related statistics structure. After this step, the circular array
+ * containing the timings is empty because all the values are
+ * dispatched in their corresponding buffers.
+ *
+ * Now for each interrupt, we can predict the next event by using the
+ * suffix array, log interval and exponential moving average
+ *
+ * 1. Suffix array
+ *
+ * Suffix array is an array of all the suffixes of a string. It is
+ * widely used as a data structure for compression, text search, ...
+ * For instance for the word 'banana', the suffixes will be: 'banana'
+ * 'anana' 'nana' 'ana' 'na' 'a'
+ *
+ * Usually, the suffix array is sorted but for our purpose it is
+ * not necessary and won't provide any improvement in the context of
+ * the solved problem where we clearly define the boundaries of the
+ * search by a max period and min period.
+ *
+ * The suffix array will build a suite of intervals of different
+ * length and will look for the repetition of each suite. If the suite
+ * is repeating then we have the period because it is the length of
+ * the suite whatever its position in the buffer.
+ *
+ * 2. Log interval
+ *
+ * We saw the irq timings allow to compute the interval of the
+ * occurrences for a specific interrupt. We can reasonibly assume the
+ * longer is the interval, the higher is the error for the next event
+ * and we can consider storing those interval values into an array
+ * where each slot in the array correspond to an interval at the power
+ * of 2 of the index. For example, index 12 will contain values
+ * between 2^11 and 2^12.
+ *
+ * At the end we have an array of values where at each index defines a
+ * [2^index - 1, 2 ^ index] interval values allowing to store a large
+ * number of values inside a small array.
+ *
+ * For example, if we have the value 1123, then we store it at
+ * ilog2(1123) = 10 index value.
+ *
+ * Storing those value at the specific index is done by computing an
+ * exponential moving average for this specific slot. For instance,
+ * for values 1800, 1123, 1453, ... fall under the same slot (10) and
+ * the exponential moving average is computed every time a new value
+ * is stored at this slot.
+ *
+ * 3. Exponential Moving Average
+ *
+ * The EMA is largely used to track a signal for stocks or as a low
+ * pass filter. The magic of the formula, is it is very simple and the
+ * reactivity of the average can be tuned with the factors called
+ * alpha.
+ *
+ * The higher the alphas are, the faster the average respond to the
+ * signal change. In our case, if a slot in the array is a big
+ * interval, we can have numbers with a big difference between
+ * them. The impact of those differences in the average computation
+ * can be tuned by changing the alpha value.
+ *
+ *
+ *  -- The algorithm --
+ *
+ * We saw the different processing above, now let's see how they are
+ * used together.
+ *
+ * For each interrupt:
+ *     For each interval:
+ *             Compute the index = ilog2(interval)
+ *             Compute a new_ema(buffer[index], interval)
+ *             Store the index in a circular buffer
+ *
+ *     Compute the suffix array of the indexes
+ *
+ *     For each suffix:
+ *             If the suffix is reverse-found 3 times
+ *                     Return suffix
+ *
+ *     Return Not found
+ *
+ * However we can not have endless suffix array to be build, it won't
+ * make sense and it will add an extra overhead, so we can restrict
+ * this to a maximum suffix length of 5 and a minimum suffix length of
+ * 2. The experience showed 5 is the majority of the maximum pattern
+ * period found for different devices.
+ *
+ * The result is a pattern finding less than 1us for an interrupt.
  *
- * @irqs: an irqt_stat struct pointer
- * @ts: the new timestamp
+ * Example based on real values:
  *
- * The statistics are computed online, in other words, the code is
- * designed to compute the statistics on a stream of values rather
- * than doing multiple passes on the values to compute the average,
- * then the variance. The integer division introduces a loss of
- * precision but with an acceptable error margin regarding the results
- * we would have with the double floating precision: we are dealing
- * with nanosec, so big numbers, consequently the mantisse is
- * negligeable, especially when converting the time in usec
- * afterwards.
+ * Example 1 : MMC write/read interrupt interval:
  *
- * The computation happens at idle time. When the CPU is not idle, the
- * interrupts' timestamps are stored in the circular buffer, when the
- * CPU goes idle and this routine is called, all the buffer's values
- * are injected in the statistical model continuying to extend the
- * statistics from the previous busy-idle cycle.
+ *     223947, 1240, 1384, 1386, 1386,
+ *     217416, 1236, 1384, 1386, 1387,
+ *     214719, 1241, 1386, 1387, 1384,
+ *     213696, 1234, 1384, 1386, 1388,
+ *     219904, 1240, 1385, 1389, 1385,
+ *     212240, 1240, 1386, 1386, 1386,
+ *     214415, 1236, 1384, 1386, 1387,
+ *     214276, 1234, 1384, 1388, ?
  *
- * The observations showed a device will trigger a burst of periodic
- * interrupts followed by one or two peaks of longer time, for
- * instance when a SD card device flushes its cache, then the periodic
- * intervals occur again. A one second inactivity period resets the
- * stats, that gives us the certitude the statistical values won't
- * exceed 1x10^9, thus the computation won't overflow.
+ * For each element, apply ilog2(value)
  *
- * Basically, the purpose of the algorithm is to watch the periodic
- * interrupts and eliminate the peaks.
+ *     15, 8, 8, 8, 8,
+ *     15, 8, 8, 8, 8,
+ *     15, 8, 8, 8, 8,
+ *     15, 8, 8, 8, 8,
+ *     15, 8, 8, 8, 8,
+ *     15, 8, 8, 8, 8,
+ *     15, 8, 8, 8, 8,
+ *     15, 8, 8, 8, ?
  *
- * An interrupt is considered periodically stable if the interval of
- * its occurences follow the normal distribution, thus the values
- * comply with:
+ * Max period of 5, we take the last (max_period * 3) 15 elements as
+ * we can be confident if the pattern repeats itself three times it is
+ * a repeating pattern.
  *
- *      avg - 3 x stddev < value < avg + 3 x stddev
+ *                  8,
+ *     15, 8, 8, 8, 8,
+ *     15, 8, 8, 8, 8,
+ *     15, 8, 8, 8, ?
  *
- * Which can be simplified to:
+ * Suffixes are:
  *
- *      -3 x stddev < value - avg < 3 x stddev
+ *  1) 8, 15, 8, 8, 8  <- max period
+ *  2) 8, 15, 8, 8
+ *  3) 8, 15, 8
+ *  4) 8, 15           <- min period
  *
- *      abs(value - avg) < 3 x stddev
+ * From there we search the repeating pattern for each suffix.
  *
- * In order to save a costly square root computation, we use the
- * variance. For the record, stddev = sqrt(variance). The equation
- * above becomes:
+ * buffer: 8, 15, 8, 8, 8, 8, 15, 8, 8, 8, 8, 15, 8, 8, 8
+ *         |   |  |  |  |  |   |  |  |  |  |   |  |  |  |
+ *         8, 15, 8, 8, 8  |   |  |  |  |  |   |  |  |  |
+ *                         8, 15, 8, 8, 8  |   |  |  |  |
+ *                                         8, 15, 8, 8, 8
  *
- *      abs(value - avg) < 3 x sqrt(variance)
+ * When moving the suffix, we found exactly 3 matches.
  *
- * And finally we square it:
+ * The first suffix with period 5 is repeating.
  *
- *      (value - avg) ^ 2 < (3 x sqrt(variance)) ^ 2
+ * The next event is (3 * max_period) % suffix_period
  *
- *      (value - avg) x (value - avg) < 9 x variance
+ * In this example, the result 0, so the next event is suffix[0] => 8
  *
- * Statistically speaking, any values out of this interval is
- * considered as an anomaly and is discarded. However, a normal
- * distribution appears when the number of samples is 30 (it is the
- * rule of thumb in statistics, cf. "30 samples" on Internet). When
- * there are three consecutive anomalies, the statistics are resetted.
+ * However, 8 is the index in the array of exponential moving average
+ * which was calculated on the fly when storing the values, so the
+ * interval is ema[8] = 1366
  *
+ *
+ * Example 2:
+ *
+ *     4, 3, 5, 100,
+ *     3, 3, 5, 117,
+ *     4, 4, 5, 112,
+ *     4, 3, 4, 110,
+ *     3, 5, 3, 117,
+ *     4, 4, 5, 112,
+ *     4, 3, 4, 110,
+ *     3, 4, 5, 112,
+ *     4, 3, 4, 110
+ *
+ * ilog2
+ *
+ *     0, 0, 0, 4,
+ *     0, 0, 0, 4,
+ *     0, 0, 0, 4,
+ *     0, 0, 0, 4,
+ *     0, 0, 0, 4,
+ *     0, 0, 0, 4,
+ *     0, 0, 0, 4,
+ *     0, 0, 0, 4,
+ *     0, 0, 0, 4
+ *
+ * Max period 5:
+ *        0, 0, 4,
+ *     0, 0, 0, 4,
+ *     0, 0, 0, 4,
+ *     0, 0, 0, 4
+ *
+ * Suffixes:
+ *
+ *  1) 0, 0, 4, 0, 0
+ *  2) 0, 0, 4, 0
+ *  3) 0, 0, 4
+ *  4) 0, 0
+ *
+ * buffer: 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4
+ *         |  |  |  |  |  |  X
+ *         0, 0, 4, 0, 0, |  X
+ *                        0, 0
+ *
+ * buffer: 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4
+ *         |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
+ *         0, 0, 4, 0, |  |  |  |  |  |  |  |  |  |  |
+ *                     0, 0, 4, 0, |  |  |  |  |  |  |
+ *                                 0, 0, 4, 0, |  |  |
+ *                                             0  0  4
+ *
+ * Pattern is found 3 times, the remaining is 1 which results from
+ * (max_period * 3) % suffix_period. This value is the index in the
+ * suffix arrays. The suffix array for a period 4 has the value 4
+ * at index 1.
+ */
+#define EMA_ALPHA_VAL          64
+#define EMA_ALPHA_SHIFT                7
+
+#define PREDICTION_PERIOD_MIN  2
+#define PREDICTION_PERIOD_MAX  5
+#define PREDICTION_FACTOR      4
+#define PREDICTION_MAX         10 /* 2 ^ PREDICTION_MAX useconds */
+#define PREDICTION_BUFFER_SIZE 16 /* slots for EMAs, hardly more than 16 */
+
+struct irqt_stat {
+       u64     last_ts;
+       u64     ema_time[PREDICTION_BUFFER_SIZE];
+       int     timings[IRQ_TIMINGS_SIZE];
+       int     circ_timings[IRQ_TIMINGS_SIZE];
+       int     count;
+};
+
+/*
+ * Exponential moving average computation
  */
-static void irqs_update(struct irqt_stat *irqs, u64 ts)
+static u64 irq_timings_ema_new(u64 value, u64 ema_old)
+{
+       s64 diff;
+
+       if (unlikely(!ema_old))
+               return value;
+
+       diff = (value - ema_old) * EMA_ALPHA_VAL;
+       /*
+        * We can use a s64 type variable to be added with the u64
+        * ema_old variable as this one will never have its topmost
+        * bit set, it will be always smaller than 2^63 nanosec
+        * interrupt interval (292 years).
+        */
+       return ema_old + (diff >> EMA_ALPHA_SHIFT);
+}
+
+static int irq_timings_next_event_index(int *buffer, size_t len, int period_max)
+{
+       int i;
+
+       /*
+        * The buffer contains the suite of intervals, in a ilog2
+        * basis, we are looking for a repetition. We point the
+        * beginning of the search three times the length of the
+        * period beginning at the end of the buffer. We do that for
+        * each suffix.
+        */
+       for (i = period_max; i >= PREDICTION_PERIOD_MIN ; i--) {
+
+               int *begin = &buffer[len - (i * 3)];
+               int *ptr = begin;
+
+               /*
+                * We look if the suite with period 'i' repeat
+                * itself. If it is truncated at the end, as it
+                * repeats we can use the period to find out the next
+                * element.
+                */
+               while (!memcmp(ptr, begin, i * sizeof(*ptr))) {
+                       ptr += i;
+                       if (ptr >= &buffer[len])
+                               return begin[((i * 3) % i)];
+               }
+       }
+
+       return -1;
+}
+
+static u64 __irq_timings_next_event(struct irqt_stat *irqs, int irq, u64 now)
+{
+       int index, i, period_max, count, start, min = INT_MAX;
+
+       if ((now - irqs->last_ts) >= NSEC_PER_SEC) {
+               irqs->count = irqs->last_ts = 0;
+               return U64_MAX;
+       }
+
+       /*
+        * As we want to find three times the repetition, we need a
+        * number of intervals greater or equal to three times the
+        * maximum period, otherwise we truncate the max period.
+        */
+       period_max = irqs->count > (3 * PREDICTION_PERIOD_MAX) ?
+               PREDICTION_PERIOD_MAX : irqs->count / 3;
+
+       /*
+        * If we don't have enough irq timings for this prediction,
+        * just bail out.
+        */
+       if (period_max <= PREDICTION_PERIOD_MIN)
+               return U64_MAX;
+
+       /*
+        * 'count' will depends if the circular buffer wrapped or not
+        */
+       count = irqs->count < IRQ_TIMINGS_SIZE ?
+               irqs->count : IRQ_TIMINGS_SIZE;
+
+       start = irqs->count < IRQ_TIMINGS_SIZE ?
+               0 : (irqs->count & IRQ_TIMINGS_MASK);
+
+       /*
+        * Copy the content of the circular buffer into another buffer
+        * in order to linearize the buffer instead of dealing with
+        * wrapping indexes and shifted array which will be prone to
+        * error and extremelly difficult to debug.
+        */
+       for (i = 0; i < count; i++) {
+               int index = (start + i) & IRQ_TIMINGS_MASK;
+
+               irqs->timings[i] = irqs->circ_timings[index];
+               min = min_t(int, irqs->timings[i], min);
+       }
+
+       index = irq_timings_next_event_index(irqs->timings, count, period_max);
+       if (index < 0)
+               return irqs->last_ts + irqs->ema_time[min];
+
+       return irqs->last_ts + irqs->ema_time[index];
+}
+
+static inline void irq_timings_store(int irq, struct irqt_stat *irqs, u64 ts)
 {
        u64 old_ts = irqs->last_ts;
-       u64 variance = 0;
        u64 interval;
-       s64 diff;
+       int index;
 
        /*
         * The timestamps are absolute time values, we need to compute
@@ -135,87 +411,28 @@ static void irqs_update(struct irqt_stat *irqs, u64 ts)
         * want as we need another timestamp to compute an interval.
         */
        if (interval >= NSEC_PER_SEC) {
-               memset(irqs, 0, sizeof(*irqs));
-               irqs->last_ts = ts;
+               irqs->count = 0;
                return;
        }
 
        /*
-        * Pre-compute the delta with the average as the result is
-        * used several times in this function.
-        */
-       diff = interval - irqs->avg;
-
-       /*
-        * Increment the number of samples.
-        */
-       irqs->nr_samples++;
-
-       /*
-        * Online variance divided by the number of elements if there
-        * is more than one sample.  Normally the formula is division
-        * by nr_samples - 1 but we assume the number of element will be
-        * more than 32 and dividing by 32 instead of 31 is enough
-        * precise.
-        */
-       if (likely(irqs->nr_samples > 1))
-               variance = irqs->variance >> IRQ_TIMINGS_SHIFT;
-
-       /*
-        * The rule of thumb in statistics for the normal distribution
-        * is having at least 30 samples in order to have the model to
-        * apply. Values outside the interval are considered as an
-        * anomaly.
-        */
-       if ((irqs->nr_samples >= 30) && ((diff * diff) > (9 * variance))) {
-               /*
-                * After three consecutive anomalies, we reset the
-                * stats as it is no longer stable enough.
-                */
-               if (irqs->anomalies++ >= 3) {
-                       memset(irqs, 0, sizeof(*irqs));
-                       irqs->last_ts = ts;
-                       return;
-               }
-       } else {
-               /*
-                * The anomalies must be consecutives, so at this
-                * point, we reset the anomalies counter.
-                */
-               irqs->anomalies = 0;
-       }
-
-       /*
-        * The interrupt is considered stable enough to try to predict
-        * the next event on it.
+        * Get the index in the ema table for this interrupt. The
+        * PREDICTION_FACTOR increase the interval size for the array
+        * of exponential average.
         */
-       irqs->valid = 1;
+       index = likely(interval) ?
+               ilog2((interval >> 10) / PREDICTION_FACTOR) : 0;
 
        /*
-        * Online average algorithm:
-        *
-        *  new_average = average + ((value - average) / count)
-        *
-        * The variance computation depends on the new average
-        * to be computed here first.
-        *
+        * Store the index as an element of the pattern in another
+        * circular array.
         */
-       irqs->avg = irqs->avg + (diff >> IRQ_TIMINGS_SHIFT);
+       irqs->circ_timings[irqs->count & IRQ_TIMINGS_MASK] = index;
 
-       /*
-        * Online variance algorithm:
-        *
-        *  new_variance = variance + (value - average) x (value - new_average)
-        *
-        * Warning: irqs->avg is updated with the line above, hence
-        * 'interval - irqs->avg' is no longer equal to 'diff'
-        */
-       irqs->variance = irqs->variance + (diff * (interval - irqs->avg));
+       irqs->ema_time[index] = irq_timings_ema_new(interval,
+                                                   irqs->ema_time[index]);
 
-       /*
-        * Update the next event
-        */
-       irqs->next_evt = ts + irqs->avg;
+       irqs->count++;
 }
 
 /**
@@ -259,6 +476,9 @@ u64 irq_timings_next_event(u64 now)
         */
        lockdep_assert_irqs_disabled();
 
+       if (!irqts->count)
+               return next_evt;
+
        /*
         * Number of elements in the circular buffer: If it happens it
         * was flushed before, then the number of elements could be
@@ -269,21 +489,19 @@ u64 irq_timings_next_event(u64 now)
         * type but with the cost of extra computation in the
         * interrupt handler hot path. We choose efficiency.
         *
-        * Inject measured irq/timestamp to the statistical model
-        * while decrementing the counter because we consume the data
-        * from our circular buffer.
+        * Inject measured irq/timestamp to the pattern prediction
+        * model while decrementing the counter because we consume the
+        * data from our circular buffer.
         */
-       for (i = irqts->count & IRQ_TIMINGS_MASK,
-                    irqts->count = min(IRQ_TIMINGS_SIZE, irqts->count);
-            irqts->count > 0; irqts->count--, i = (i + 1) & IRQ_TIMINGS_MASK) {
 
-               irq = irq_timing_decode(irqts->values[i], &ts);
+       i = (irqts->count & IRQ_TIMINGS_MASK) - 1;
+       irqts->count = min(IRQ_TIMINGS_SIZE, irqts->count);
 
+       for (; irqts->count > 0; irqts->count--, i = (i + 1) & IRQ_TIMINGS_MASK) {
+               irq = irq_timing_decode(irqts->values[i], &ts);
                s = idr_find(&irqt_stats, irq);
-               if (s) {
-                       irqs = this_cpu_ptr(s);
-                       irqs_update(irqs, ts);
-               }
+               if (s)
+                       irq_timings_store(irq, this_cpu_ptr(s), ts);
        }
 
        /*
@@ -294,26 +512,12 @@ u64 irq_timings_next_event(u64 now)
 
                irqs = this_cpu_ptr(s);
 
-               if (!irqs->valid)
-                       continue;
+               ts = __irq_timings_next_event(irqs, i, now);
+               if (ts <= now)
+                       return now;
 
-               if (irqs->next_evt <= now) {
-                       irq = i;
-                       next_evt = now;
-
-                       /*
-                        * This interrupt mustn't use in the future
-                        * until new events occur and update the
-                        * statistics.
-                        */
-                       irqs->valid = 0;
-                       break;
-               }
-
-               if (irqs->next_evt < next_evt) {
-                       irq = i;
-                       next_evt = irqs->next_evt;
-               }
+               if (ts < next_evt)
+                       next_evt = ts;
        }
 
        return next_evt;
index 6b7cdf17ccf890c8c1597eb2a70dea2ace5e46db..73288914ed5e78cc44b596ab5428ac98aceb73b3 100644 (file)
@@ -56,61 +56,70 @@ void __weak arch_irq_work_raise(void)
         */
 }
 
-/*
- * Enqueue the irq_work @work on @cpu unless it's already pending
- * somewhere.
- *
- * Can be re-enqueued while the callback is still in progress.
- */
-bool irq_work_queue_on(struct irq_work *work, int cpu)
+/* Enqueue on current CPU, work must already be claimed and preempt disabled */
+static void __irq_work_queue_local(struct irq_work *work)
 {
-       /* All work should have been flushed before going offline */
-       WARN_ON_ONCE(cpu_is_offline(cpu));
-
-#ifdef CONFIG_SMP
-
-       /* Arch remote IPI send/receive backend aren't NMI safe */
-       WARN_ON_ONCE(in_nmi());
+       /* If the work is "lazy", handle it from next tick if any */
+       if (work->flags & IRQ_WORK_LAZY) {
+               if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
+                   tick_nohz_tick_stopped())
+                       arch_irq_work_raise();
+       } else {
+               if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
+                       arch_irq_work_raise();
+       }
+}
 
+/* Enqueue the irq work @work on the current CPU */
+bool irq_work_queue(struct irq_work *work)
+{
        /* Only queue if not already pending */
        if (!irq_work_claim(work))
                return false;
 
-       if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
-               arch_send_call_function_single_ipi(cpu);
-
-#else /* #ifdef CONFIG_SMP */
-       irq_work_queue(work);
-#endif /* #else #ifdef CONFIG_SMP */
+       /* Queue the entry and raise the IPI if needed. */
+       preempt_disable();
+       __irq_work_queue_local(work);
+       preempt_enable();
 
        return true;
 }
+EXPORT_SYMBOL_GPL(irq_work_queue);
 
-/* Enqueue the irq work @work on the current CPU */
-bool irq_work_queue(struct irq_work *work)
+/*
+ * Enqueue the irq_work @work on @cpu unless it's already pending
+ * somewhere.
+ *
+ * Can be re-enqueued while the callback is still in progress.
+ */
+bool irq_work_queue_on(struct irq_work *work, int cpu)
 {
+#ifndef CONFIG_SMP
+       return irq_work_queue(work);
+
+#else /* CONFIG_SMP: */
+       /* All work should have been flushed before going offline */
+       WARN_ON_ONCE(cpu_is_offline(cpu));
+
        /* Only queue if not already pending */
        if (!irq_work_claim(work))
                return false;
 
-       /* Queue the entry and raise the IPI if needed. */
        preempt_disable();
-
-       /* If the work is "lazy", handle it from next tick if any */
-       if (work->flags & IRQ_WORK_LAZY) {
-               if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
-                   tick_nohz_tick_stopped())
-                       arch_irq_work_raise();
+       if (cpu != smp_processor_id()) {
+               /* Arch remote IPI send/receive backend aren't NMI safe */
+               WARN_ON_ONCE(in_nmi());
+               if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
+                       arch_send_call_function_single_ipi(cpu);
        } else {
-               if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
-                       arch_irq_work_raise();
+               __irq_work_queue_local(work);
        }
-
        preempt_enable();
 
        return true;
+#endif /* CONFIG_SMP */
 }
-EXPORT_SYMBOL_GPL(irq_work_queue);
+
 
 bool irq_work_needs_cpu(void)
 {
index bad96b476eb6eb13c1d7f1774a1348506d0e5f10..de6efdecc70d0b6e7d49023f009533e90fc785ef 100644 (file)
@@ -202,11 +202,13 @@ void static_key_disable(struct static_key *key)
 }
 EXPORT_SYMBOL_GPL(static_key_disable);
 
-static void __static_key_slow_dec_cpuslocked(struct static_key *key,
-                                          unsigned long rate_limit,
-                                          struct delayed_work *work)
+static bool static_key_slow_try_dec(struct static_key *key)
 {
-       lockdep_assert_cpus_held();
+       int val;
+
+       val = atomic_fetch_add_unless(&key->enabled, -1, 1);
+       if (val == 1)
+               return false;
 
        /*
         * The negative count check is valid even when a negative
@@ -215,63 +217,70 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key,
         * returns is unbalanced, because all other static_key_slow_inc()
         * instances block while the update is in progress.
         */
-       if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
-               WARN(atomic_read(&key->enabled) < 0,
-                    "jump label: negative count!\n");
+       WARN(val < 0, "jump label: negative count!\n");
+       return true;
+}
+
+static void __static_key_slow_dec_cpuslocked(struct static_key *key)
+{
+       lockdep_assert_cpus_held();
+
+       if (static_key_slow_try_dec(key))
                return;
-       }
 
-       if (rate_limit) {
-               atomic_inc(&key->enabled);
-               schedule_delayed_work(work, rate_limit);
-       } else {
+       jump_label_lock();
+       if (atomic_dec_and_test(&key->enabled))
                jump_label_update(key);
-       }
        jump_label_unlock();
 }
 
-static void __static_key_slow_dec(struct static_key *key,
-                                 unsigned long rate_limit,
-                                 struct delayed_work *work)
+static void __static_key_slow_dec(struct static_key *key)
 {
        cpus_read_lock();
-       __static_key_slow_dec_cpuslocked(key, rate_limit, work);
+       __static_key_slow_dec_cpuslocked(key);
        cpus_read_unlock();
 }
 
-static void jump_label_update_timeout(struct work_struct *work)
+void jump_label_update_timeout(struct work_struct *work)
 {
        struct static_key_deferred *key =
                container_of(work, struct static_key_deferred, work.work);
-       __static_key_slow_dec(&key->key, 0, NULL);
+       __static_key_slow_dec(&key->key);
 }
+EXPORT_SYMBOL_GPL(jump_label_update_timeout);
 
 void static_key_slow_dec(struct static_key *key)
 {
        STATIC_KEY_CHECK_USE(key);
-       __static_key_slow_dec(key, 0, NULL);
+       __static_key_slow_dec(key);
 }
 EXPORT_SYMBOL_GPL(static_key_slow_dec);
 
 void static_key_slow_dec_cpuslocked(struct static_key *key)
 {
        STATIC_KEY_CHECK_USE(key);
-       __static_key_slow_dec_cpuslocked(key, 0, NULL);
+       __static_key_slow_dec_cpuslocked(key);
 }
 
-void static_key_slow_dec_deferred(struct static_key_deferred *key)
+void __static_key_slow_dec_deferred(struct static_key *key,
+                                   struct delayed_work *work,
+                                   unsigned long timeout)
 {
        STATIC_KEY_CHECK_USE(key);
-       __static_key_slow_dec(&key->key, key->timeout, &key->work);
+
+       if (static_key_slow_try_dec(key))
+               return;
+
+       schedule_delayed_work(work, timeout);
 }
-EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
+EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
 
-void static_key_deferred_flush(struct static_key_deferred *key)
+void __static_key_deferred_flush(void *key, struct delayed_work *work)
 {
        STATIC_KEY_CHECK_USE(key);
-       flush_delayed_work(&key->work);
+       flush_delayed_work(work);
 }
-EXPORT_SYMBOL_GPL(static_key_deferred_flush);
+EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
 
 void jump_label_rate_limit(struct static_key_deferred *key,
                unsigned long rl)
index d7140447be75b809c5f5979d7be280c2f46de1c2..fd5c95ff9251fc2c785547b8ad968d18cfda8e73 100644 (file)
@@ -1150,7 +1150,7 @@ int kernel_kexec(void)
                error = dpm_suspend_end(PMSG_FREEZE);
                if (error)
                        goto Resume_devices;
-               error = disable_nonboot_cpus();
+               error = suspend_disable_secondary_cpus();
                if (error)
                        goto Enable_cpus;
                local_irq_disable();
@@ -1183,7 +1183,7 @@ int kernel_kexec(void)
  Enable_irqs:
                local_irq_enable();
  Enable_cpus:
-               enable_nonboot_cpus();
+               suspend_enable_secondary_cpus();
                dpm_resume_start(PMSG_RESTORE);
  Resume_devices:
                dpm_resume_end(PMSG_RESTORE);
index c83e547271312e9f32ed5447fdb9eef1b8e2c2e7..b1ea30a5540e9e1af5f1c86e5590195b6f00bc9d 100644 (file)
@@ -709,7 +709,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
 static int reuse_unused_kprobe(struct kprobe *ap)
 {
        struct optimized_kprobe *op;
-       int ret;
 
        /*
         * Unused kprobe MUST be on the way of delayed unoptimizing (means
@@ -720,9 +719,8 @@ static int reuse_unused_kprobe(struct kprobe *ap)
        /* Enable the probe again */
        ap->flags &= ~KPROBE_FLAG_DISABLED;
        /* Optimize it again (remove from op->list) */
-       ret = kprobe_optready(ap);
-       if (ret)
-               return ret;
+       if (!kprobe_optready(ap))
+               return -EINVAL;
 
        optimize_kprobe(ap);
        return 0;
index 96b4179cee6a76db0c63d631a8e669399d94d179..99a5b5f46dc5d99080ef7b1320c7c59106f580c4 100644 (file)
@@ -120,8 +120,8 @@ account_global_scheduler_latency(struct task_struct *tsk,
                                break;
                        }
 
-                       /* 0 and ULONG_MAX entries mean end of backtrace: */
-                       if (record == 0 || record == ULONG_MAX)
+                       /* 0 entry marks end of backtrace: */
+                       if (!record)
                                break;
                }
                if (same) {
@@ -141,20 +141,6 @@ account_global_scheduler_latency(struct task_struct *tsk,
        memcpy(&latency_record[i], lat, sizeof(struct latency_record));
 }
 
-/*
- * Iterator to store a backtrace into a latency record entry
- */
-static inline void store_stacktrace(struct task_struct *tsk,
-                                       struct latency_record *lat)
-{
-       struct stack_trace trace;
-
-       memset(&trace, 0, sizeof(trace));
-       trace.max_entries = LT_BACKTRACEDEPTH;
-       trace.entries = &lat->backtrace[0];
-       save_stack_trace_tsk(tsk, &trace);
-}
-
 /**
  * __account_scheduler_latency - record an occurred latency
  * @tsk - the task struct of the task hitting the latency
@@ -191,7 +177,8 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
        lat.count = 1;
        lat.time = usecs;
        lat.max = usecs;
-       store_stacktrace(tsk, &lat);
+
+       stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0);
 
        raw_spin_lock_irqsave(&latency_lock, flags);
 
@@ -210,8 +197,8 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
                                break;
                        }
 
-                       /* 0 and ULONG_MAX entries mean end of backtrace: */
-                       if (record == 0 || record == ULONG_MAX)
+                       /* 0 entry is end of backtrace */
+                       if (!record)
                                break;
                }
                if (same) {
@@ -252,10 +239,10 @@ static int lstats_show(struct seq_file *m, void *v)
                                   lr->count, lr->time, lr->max);
                        for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
                                unsigned long bt = lr->backtrace[q];
+
                                if (!bt)
                                        break;
-                               if (bt == ULONG_MAX)
-                                       break;
+
                                seq_printf(m, " %ps", (void *)bt);
                        }
                        seq_puts(m, "\n");
index 9c89ae8b337a253c701885352b246fbe3661c4dd..c53370d596be6b5c271eeb7f3ccdefde94450139 100644 (file)
@@ -202,15 +202,15 @@ void klp_update_patch_state(struct task_struct *task)
  * Determine whether the given stack trace includes any references to a
  * to-be-patched or to-be-unpatched function.
  */
-static int klp_check_stack_func(struct klp_func *func,
-                               struct stack_trace *trace)
+static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
+                               unsigned int nr_entries)
 {
        unsigned long func_addr, func_size, address;
        struct klp_ops *ops;
        int i;
 
-       for (i = 0; i < trace->nr_entries; i++) {
-               address = trace->entries[i];
+       for (i = 0; i < nr_entries; i++) {
+               address = entries[i];
 
                if (klp_target_state == KLP_UNPATCHED) {
                         /*
@@ -254,29 +254,25 @@ static int klp_check_stack_func(struct klp_func *func,
 static int klp_check_stack(struct task_struct *task, char *err_buf)
 {
        static unsigned long entries[MAX_STACK_ENTRIES];
-       struct stack_trace trace;
        struct klp_object *obj;
        struct klp_func *func;
-       int ret;
+       int ret, nr_entries;
 
-       trace.skip = 0;
-       trace.nr_entries = 0;
-       trace.max_entries = MAX_STACK_ENTRIES;
-       trace.entries = entries;
-       ret = save_stack_trace_tsk_reliable(task, &trace);
+       ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
        WARN_ON_ONCE(ret == -ENOSYS);
-       if (ret) {
+       if (ret < 0) {
                snprintf(err_buf, STACK_ERR_BUF_SIZE,
                         "%s: %s:%d has an unreliable stack\n",
                         __func__, task->comm, task->pid);
                return ret;
        }
+       nr_entries = ret;
 
        klp_for_each_object(klp_transition_patch, obj) {
                if (!obj->patched)
                        continue;
                klp_for_each_func(obj, func) {
-                       ret = klp_check_stack_func(func, &trace);
+                       ret = klp_check_stack_func(func, entries, nr_entries);
                        if (ret) {
                                snprintf(err_buf, STACK_ERR_BUF_SIZE,
                                         "%s: %s:%d is sleeping on function %s\n",
index 392c7f23af7651a52dda141740d890b77114cab6..6fe2f333aecb5a541eb0f78b21263316974161e5 100644 (file)
@@ -3,7 +3,7 @@
 # and is generally not a function of system call inputs.
 KCOV_INSTRUMENT                := n
 
-obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
+obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o rwsem-xadd.o
 
 ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
@@ -25,8 +25,7 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
 obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
-obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
-obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
 obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
 obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
 obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o
+obj-$(CONFIG_LOCK_EVENT_COUNTS) += lock_events.o
diff --git a/kernel/locking/lock_events.c b/kernel/locking/lock_events.c
new file mode 100644 (file)
index 0000000..fa2c2f9
--- /dev/null
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Authors: Waiman Long <waiman.long@hpe.com>
+ */
+
+/*
+ * Collect locking event counts
+ */
+#include <linux/debugfs.h>
+#include <linux/sched.h>
+#include <linux/sched/clock.h>
+#include <linux/fs.h>
+
+#include "lock_events.h"
+
+#undef  LOCK_EVENT
+#define LOCK_EVENT(name)       [LOCKEVENT_ ## name] = #name,
+
+#define LOCK_EVENTS_DIR                "lock_event_counts"
+
+/*
+ * When CONFIG_LOCK_EVENT_COUNTS is enabled, event counts of different
+ * types of locks will be reported under the <debugfs>/lock_event_counts/
+ * directory. See lock_events_list.h for the list of available locking
+ * events.
+ *
+ * Writing to the special ".reset_counts" file will reset all the above
+ * locking event counts. This is a very slow operation and so should not
+ * be done frequently.
+ *
+ * These event counts are implemented as per-cpu variables which are
+ * summed and computed whenever the corresponding debugfs files are read. This
+ * minimizes added overhead making the counts usable even in a production
+ * environment.
+ */
+static const char * const lockevent_names[lockevent_num + 1] = {
+
+#include "lock_events_list.h"
+
+       [LOCKEVENT_reset_cnts] = ".reset_counts",
+};
+
+/*
+ * Per-cpu counts
+ */
+DEFINE_PER_CPU(unsigned long, lockevents[lockevent_num]);
+
+/*
+ * The lockevent_read() function can be overridden.
+ */
+ssize_t __weak lockevent_read(struct file *file, char __user *user_buf,
+                             size_t count, loff_t *ppos)
+{
+       char buf[64];
+       int cpu, id, len;
+       u64 sum = 0;
+
+       /*
+        * Get the counter ID stored in file->f_inode->i_private
+        */
+       id = (long)file_inode(file)->i_private;
+
+       if (id >= lockevent_num)
+               return -EBADF;
+
+       for_each_possible_cpu(cpu)
+               sum += per_cpu(lockevents[id], cpu);
+       len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+/*
+ * Function to handle write request
+ *
+ * When idx = reset_cnts, reset all the counts.
+ */
+static ssize_t lockevent_write(struct file *file, const char __user *user_buf,
+                          size_t count, loff_t *ppos)
+{
+       int cpu;
+
+       /*
+        * Get the counter ID stored in file->f_inode->i_private
+        */
+       if ((long)file_inode(file)->i_private != LOCKEVENT_reset_cnts)
+               return count;
+
+       for_each_possible_cpu(cpu) {
+               int i;
+               unsigned long *ptr = per_cpu_ptr(lockevents, cpu);
+
+               for (i = 0 ; i < lockevent_num; i++)
+                       WRITE_ONCE(ptr[i], 0);
+       }
+       return count;
+}
+
+/*
+ * Debugfs data structures
+ */
+static const struct file_operations fops_lockevent = {
+       .read = lockevent_read,
+       .write = lockevent_write,
+       .llseek = default_llseek,
+};
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#include <asm/paravirt.h>
+
+static bool __init skip_lockevent(const char *name)
+{
+       static int pv_on __initdata = -1;
+
+       if (pv_on < 0)
+               pv_on = !pv_is_native_spin_unlock();
+       /*
+        * Skip PV qspinlock events on bare metal.
+        */
+       if (!pv_on && !memcmp(name, "pv_", 3))
+               return true;
+       return false;
+}
+#else
+static inline bool skip_lockevent(const char *name)
+{
+       return false;
+}
+#endif
+
+/*
+ * Initialize debugfs for the locking event counts.
+ */
+static int __init init_lockevent_counts(void)
+{
+       struct dentry *d_counts = debugfs_create_dir(LOCK_EVENTS_DIR, NULL);
+       int i;
+
+       if (!d_counts)
+               goto out;
+
+       /*
+        * Create the debugfs files
+        *
+        * As reading from and writing to the stat files can be slow, only
+        * root is allowed to do the read/write to limit impact to system
+        * performance.
+        */
+       for (i = 0; i < lockevent_num; i++) {
+               if (skip_lockevent(lockevent_names[i]))
+                       continue;
+               if (!debugfs_create_file(lockevent_names[i], 0400, d_counts,
+                                        (void *)(long)i, &fops_lockevent))
+                       goto fail_undo;
+       }
+
+       if (!debugfs_create_file(lockevent_names[LOCKEVENT_reset_cnts], 0200,
+                                d_counts, (void *)(long)LOCKEVENT_reset_cnts,
+                                &fops_lockevent))
+               goto fail_undo;
+
+       return 0;
+fail_undo:
+       debugfs_remove_recursive(d_counts);
+out:
+       pr_warn("Could not create '%s' debugfs entries\n", LOCK_EVENTS_DIR);
+       return -ENOMEM;
+}
+fs_initcall(init_lockevent_counts);
diff --git a/kernel/locking/lock_events.h b/kernel/locking/lock_events.h
new file mode 100644 (file)
index 0000000..feb1acc
--- /dev/null
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Authors: Waiman Long <longman@redhat.com>
+ */
+
+#ifndef __LOCKING_LOCK_EVENTS_H
+#define __LOCKING_LOCK_EVENTS_H
+
+enum lock_events {
+
+#include "lock_events_list.h"
+
+       lockevent_num,  /* Total number of lock event counts */
+       LOCKEVENT_reset_cnts = lockevent_num,
+};
+
+#ifdef CONFIG_LOCK_EVENT_COUNTS
+/*
+ * Per-cpu counters
+ */
+DECLARE_PER_CPU(unsigned long, lockevents[lockevent_num]);
+
+/*
+ * Increment the PV qspinlock statistical counters
+ */
+static inline void __lockevent_inc(enum lock_events event, bool cond)
+{
+       if (cond)
+               __this_cpu_inc(lockevents[event]);
+}
+
+#define lockevent_inc(ev)        __lockevent_inc(LOCKEVENT_ ##ev, true)
+#define lockevent_cond_inc(ev, c) __lockevent_inc(LOCKEVENT_ ##ev, c)
+
+static inline void __lockevent_add(enum lock_events event, int inc)
+{
+       __this_cpu_add(lockevents[event], inc);
+}
+
+#define lockevent_add(ev, c)   __lockevent_add(LOCKEVENT_ ##ev, c)
+
+#else  /* CONFIG_LOCK_EVENT_COUNTS */
+
+#define lockevent_inc(ev)
+#define lockevent_add(ev, c)
+#define lockevent_cond_inc(ev, c)
+
+#endif /* CONFIG_LOCK_EVENT_COUNTS */
+#endif /* __LOCKING_LOCK_EVENTS_H */
diff --git a/kernel/locking/lock_events_list.h b/kernel/locking/lock_events_list.h
new file mode 100644 (file)
index 0000000..ad7668c
--- /dev/null
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Authors: Waiman Long <longman@redhat.com>
+ */
+
+#ifndef LOCK_EVENT
+#define LOCK_EVENT(name)       LOCKEVENT_ ## name,
+#endif
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+/*
+ * Locking events for PV qspinlock.
+ */
+LOCK_EVENT(pv_hash_hops)       /* Average # of hops per hashing operation */
+LOCK_EVENT(pv_kick_unlock)     /* # of vCPU kicks issued at unlock time   */
+LOCK_EVENT(pv_kick_wake)       /* # of vCPU kicks for pv_latency_wake     */
+LOCK_EVENT(pv_latency_kick)    /* Average latency (ns) of vCPU kick       */
+LOCK_EVENT(pv_latency_wake)    /* Average latency (ns) of kick-to-wakeup  */
+LOCK_EVENT(pv_lock_stealing)   /* # of lock stealing operations           */
+LOCK_EVENT(pv_spurious_wakeup) /* # of spurious wakeups in non-head vCPUs */
+LOCK_EVENT(pv_wait_again)      /* # of wait's after queue head vCPU kick  */
+LOCK_EVENT(pv_wait_early)      /* # of early vCPU wait's                  */
+LOCK_EVENT(pv_wait_head)       /* # of vCPU wait's at the queue head      */
+LOCK_EVENT(pv_wait_node)       /* # of vCPU wait's at non-head queue node */
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
+/*
+ * Locking events for qspinlock
+ *
+ * Subtracting lock_use_node[234] from lock_slowpath will give you
+ * lock_use_node1.
+ */
+LOCK_EVENT(lock_pending)       /* # of locking ops via pending code         */
+LOCK_EVENT(lock_slowpath)      /* # of locking ops via MCS lock queue       */
+LOCK_EVENT(lock_use_node2)     /* # of locking ops that use 2nd percpu node */
+LOCK_EVENT(lock_use_node3)     /* # of locking ops that use 3rd percpu node */
+LOCK_EVENT(lock_use_node4)     /* # of locking ops that use 4th percpu node */
+LOCK_EVENT(lock_no_node)       /* # of locking ops w/o using percpu node    */
+#endif /* CONFIG_QUEUED_SPINLOCKS */
+
+/*
+ * Locking events for rwsem
+ */
+LOCK_EVENT(rwsem_sleep_reader) /* # of reader sleeps                   */
+LOCK_EVENT(rwsem_sleep_writer) /* # of writer sleeps                   */
+LOCK_EVENT(rwsem_wake_reader)  /* # of reader wakeups                  */
+LOCK_EVENT(rwsem_wake_writer)  /* # of writer wakeups                  */
+LOCK_EVENT(rwsem_opt_wlock)    /* # of write locks opt-spin acquired   */
+LOCK_EVENT(rwsem_opt_fail)     /* # of failed opt-spinnings            */
+LOCK_EVENT(rwsem_rlock)                /* # of read locks acquired             */
+LOCK_EVENT(rwsem_rlock_fast)   /* # of fast read locks acquired        */
+LOCK_EVENT(rwsem_rlock_fail)   /* # of failed read lock acquisitions   */
+LOCK_EVENT(rwsem_rtrylock)     /* # of read trylock calls              */
+LOCK_EVENT(rwsem_wlock)                /* # of write locks acquired            */
+LOCK_EVENT(rwsem_wlock_fail)   /* # of failed write lock acquisitions  */
+LOCK_EVENT(rwsem_wtrylock)     /* # of write trylock calls             */
index 34cdcbedda492b84cb610af67cb11113ea04065d..27b992fe8cecb25a204104b7ea3461fbb456fe79 100644 (file)
@@ -434,29 +434,14 @@ static void print_lockdep_off(const char *bug_msg)
 #endif
 }
 
-static int save_trace(struct stack_trace *trace)
+static int save_trace(struct lock_trace *trace)
 {
-       trace->nr_entries = 0;
-       trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
-       trace->entries = stack_trace + nr_stack_trace_entries;
-
-       trace->skip = 3;
-
-       save_stack_trace(trace);
-
-       /*
-        * Some daft arches put -1 at the end to indicate its a full trace.
-        *
-        * <rant> this is buggy anyway, since it takes a whole extra entry so a
-        * complete trace that maxes out the entries provided will be reported
-        * as incomplete, friggin useless </rant>
-        */
-       if (trace->nr_entries != 0 &&
-           trace->entries[trace->nr_entries-1] == ULONG_MAX)
-               trace->nr_entries--;
-
-       trace->max_entries = trace->nr_entries;
+       unsigned long *entries = stack_trace + nr_stack_trace_entries;
+       unsigned int max_entries;
 
+       trace->offset = nr_stack_trace_entries;
+       max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
+       trace->nr_entries = stack_trace_save(entries, max_entries, 3);
        nr_stack_trace_entries += trace->nr_entries;
 
        if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
@@ -516,11 +501,11 @@ static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
 {
        char c = '.';
 
-       if (class->usage_mask & lock_flag(bit + 2))
+       if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK))
                c = '+';
        if (class->usage_mask & lock_flag(bit)) {
                c = '-';
-               if (class->usage_mask & lock_flag(bit + 2))
+               if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK))
                        c = '?';
        }
 
@@ -1207,7 +1192,7 @@ static struct lock_list *alloc_list_entry(void)
 static int add_lock_to_list(struct lock_class *this,
                            struct lock_class *links_to, struct list_head *head,
                            unsigned long ip, int distance,
-                           struct stack_trace *trace)
+                           struct lock_trace *trace)
 {
        struct lock_list *entry;
        /*
@@ -1426,6 +1411,13 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
  * checking.
  */
 
+static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
+{
+       unsigned long *entries = stack_trace + trace->offset;
+
+       stack_trace_print(entries, trace->nr_entries, spaces);
+}
+
 /*
  * Print a dependency chain entry (this is only done when a deadlock
  * has been detected):
@@ -1438,8 +1430,7 @@ print_circular_bug_entry(struct lock_list *target, int depth)
        printk("\n-> #%u", depth);
        print_lock_name(target->class);
        printk(KERN_CONT ":\n");
-       print_stack_trace(&target->trace, 6);
-
+       print_lock_trace(&target->trace, 6);
        return 0;
 }
 
@@ -1533,10 +1524,9 @@ static inline int class_equal(struct lock_list *entry, void *data)
 }
 
 static noinline int print_circular_bug(struct lock_list *this,
-                               struct lock_list *target,
-                               struct held_lock *check_src,
-                               struct held_lock *check_tgt,
-                               struct stack_trace *trace)
+                                      struct lock_list *target,
+                                      struct held_lock *check_src,
+                                      struct held_lock *check_tgt)
 {
        struct task_struct *curr = current;
        struct lock_list *parent;
@@ -1676,19 +1666,25 @@ check_redundant(struct lock_list *root, struct lock_class *target,
 }
 
 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
+
+static inline int usage_accumulate(struct lock_list *entry, void *mask)
+{
+       *(unsigned long *)mask |= entry->class->usage_mask;
+
+       return 0;
+}
+
 /*
  * Forwards and backwards subgraph searching, for the purposes of
  * proving that two subgraphs can be connected by a new dependency
  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
  */
 
-static inline int usage_match(struct lock_list *entry, void *bit)
+static inline int usage_match(struct lock_list *entry, void *mask)
 {
-       return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
+       return entry->class->usage_mask & *(unsigned long *)mask;
 }
 
-
-
 /*
  * Find a node in the forwards-direction dependency sub-graph starting
  * at @root->class that matches @bit.
@@ -1700,14 +1696,14 @@ static inline int usage_match(struct lock_list *entry, void *bit)
  * Return <0 on error.
  */
 static int
-find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
+find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
                        struct lock_list **target_entry)
 {
        int result;
 
        debug_atomic_inc(nr_find_usage_forwards_checks);
 
-       result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
+       result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
 
        return result;
 }
@@ -1723,14 +1719,14 @@ find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
  * Return <0 on error.
  */
 static int
-find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
+find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
                        struct lock_list **target_entry)
 {
        int result;
 
        debug_atomic_inc(nr_find_usage_backwards_checks);
 
-       result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
+       result = __bfs_backwards(root, &usage_mask, usage_match, target_entry);
 
        return result;
 }
@@ -1752,7 +1748,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
 
                        len += printk("%*s   %s", depth, "", usage_str[bit]);
                        len += printk(KERN_CONT " at:\n");
-                       print_stack_trace(class->usage_traces + bit, len);
+                       print_lock_trace(class->usage_traces + bit, len);
                }
        }
        printk("%*s }\n", depth, "");
@@ -1777,7 +1773,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
        do {
                print_lock_class_header(entry->class, depth);
                printk("%*s ... acquired at:\n", depth, "");
-               print_stack_trace(&entry->trace, 2);
+               print_lock_trace(&entry->trace, 2);
                printk("\n");
 
                if (depth == 0 && (entry != root)) {
@@ -1890,14 +1886,14 @@ print_bad_irq_dependency(struct task_struct *curr,
        print_lock_name(backwards_entry->class);
        pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
 
-       print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
+       print_lock_trace(backwards_entry->class->usage_traces + bit1, 1);
 
        pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
        print_lock_name(forwards_entry->class);
        pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
        pr_warn("...");
 
-       print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
+       print_lock_trace(forwards_entry->class->usage_traces + bit2, 1);
 
        pr_warn("\nother info that might help us debug this:\n\n");
        print_irq_lock_scenario(backwards_entry, forwards_entry,
@@ -1922,39 +1918,6 @@ print_bad_irq_dependency(struct task_struct *curr,
        return 0;
 }
 
-static int
-check_usage(struct task_struct *curr, struct held_lock *prev,
-           struct held_lock *next, enum lock_usage_bit bit_backwards,
-           enum lock_usage_bit bit_forwards, const char *irqclass)
-{
-       int ret;
-       struct lock_list this, that;
-       struct lock_list *uninitialized_var(target_entry);
-       struct lock_list *uninitialized_var(target_entry1);
-
-       this.parent = NULL;
-
-       this.class = hlock_class(prev);
-       ret = find_usage_backwards(&this, bit_backwards, &target_entry);
-       if (ret < 0)
-               return print_bfs_bug(ret);
-       if (ret == 1)
-               return ret;
-
-       that.parent = NULL;
-       that.class = hlock_class(next);
-       ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
-       if (ret < 0)
-               return print_bfs_bug(ret);
-       if (ret == 1)
-               return ret;
-
-       return print_bad_irq_dependency(curr, &this, &that,
-                       target_entry, target_entry1,
-                       prev, next,
-                       bit_backwards, bit_forwards, irqclass);
-}
-
 static const char *state_names[] = {
 #define LOCKDEP_STATE(__STATE) \
        __stringify(__STATE),
@@ -1971,9 +1934,19 @@ static const char *state_rnames[] = {
 
 static inline const char *state_name(enum lock_usage_bit bit)
 {
-       return (bit & LOCK_USAGE_READ_MASK) ? state_rnames[bit >> 2] : state_names[bit >> 2];
+       if (bit & LOCK_USAGE_READ_MASK)
+               return state_rnames[bit >> LOCK_USAGE_DIR_MASK];
+       else
+               return state_names[bit >> LOCK_USAGE_DIR_MASK];
 }
 
+/*
+ * The bit number is encoded like:
+ *
+ *  bit0: 0 exclusive, 1 read lock
+ *  bit1: 0 used in irq, 1 irq enabled
+ *  bit2-n: state
+ */
 static int exclusive_bit(int new_bit)
 {
        int state = new_bit & LOCK_USAGE_STATE_MASK;
@@ -1985,45 +1958,160 @@ static int exclusive_bit(int new_bit)
        return state | (dir ^ LOCK_USAGE_DIR_MASK);
 }
 
+/*
+ * Observe that when given a bitmask where each bitnr is encoded as above, a
+ * right shift of the mask transforms the individual bitnrs as -1 and
+ * conversely, a left shift transforms into +1 for the individual bitnrs.
+ *
+ * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can
+ * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0)
+ * instead by subtracting the bit number by 2, or shifting the mask right by 2.
+ *
+ * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2.
+ *
+ * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is
+ * all bits set) and recompose with bitnr1 flipped.
+ */
+static unsigned long invert_dir_mask(unsigned long mask)
+{
+       unsigned long excl = 0;
+
+       /* Invert dir */
+       excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK;
+       excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK;
+
+       return excl;
+}
+
+/*
+ * As above, we clear bitnr0 (LOCK_*_READ off) with bitmask ops. First, for all
+ * bits with bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*).
+ * And then mask out all bitnr0.
+ */
+static unsigned long exclusive_mask(unsigned long mask)
+{
+       unsigned long excl = invert_dir_mask(mask);
+
+       /* Strip read */
+       excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
+       excl &= ~LOCKF_IRQ_READ;
+
+       return excl;
+}
+
+/*
+ * Retrieve the _possible_ original mask to which @mask is
+ * exclusive. Ie: this is the opposite of exclusive_mask().
+ * Note that 2 possible original bits can match an exclusive
+ * bit: one has LOCK_USAGE_READ_MASK set, the other has it
+ * cleared. So both are returned for each exclusive bit.
+ */
+static unsigned long original_mask(unsigned long mask)
+{
+       unsigned long excl = invert_dir_mask(mask);
+
+       /* Include read in existing usages */
+       excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
+
+       return excl;
+}
+
+/*
+ * Find the first pair of bit match between an original
+ * usage mask and an exclusive usage mask.
+ */
+static int find_exclusive_match(unsigned long mask,
+                               unsigned long excl_mask,
+                               enum lock_usage_bit *bitp,
+                               enum lock_usage_bit *excl_bitp)
+{
+       int bit, excl;
+
+       for_each_set_bit(bit, &mask, LOCK_USED) {
+               excl = exclusive_bit(bit);
+               if (excl_mask & lock_flag(excl)) {
+                       *bitp = bit;
+                       *excl_bitp = excl;
+                       return 0;
+               }
+       }
+       return -1;
+}
+
+/*
+ * Prove that the new dependency does not connect a hardirq-safe(-read)
+ * lock with a hardirq-unsafe lock - to achieve this we search
+ * the backwards-subgraph starting at <prev>, and the
+ * forwards-subgraph starting at <next>:
+ */
 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
-                          struct held_lock *next, enum lock_usage_bit bit)
+                          struct held_lock *next)
 {
+       unsigned long usage_mask = 0, forward_mask, backward_mask;
+       enum lock_usage_bit forward_bit = 0, backward_bit = 0;
+       struct lock_list *uninitialized_var(target_entry1);
+       struct lock_list *uninitialized_var(target_entry);
+       struct lock_list this, that;
+       int ret;
+
        /*
-        * Prove that the new dependency does not connect a hardirq-safe
-        * lock with a hardirq-unsafe lock - to achieve this we search
-        * the backwards-subgraph starting at <prev>, and the
-        * forwards-subgraph starting at <next>:
+        * Step 1: gather all hard/soft IRQs usages backward in an
+        * accumulated usage mask.
         */
-       if (!check_usage(curr, prev, next, bit,
-                          exclusive_bit(bit), state_name(bit)))
-               return 0;
+       this.parent = NULL;
+       this.class = hlock_class(prev);
+
+       ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL);
+       if (ret < 0)
+               return print_bfs_bug(ret);
 
-       bit++; /* _READ */
+       usage_mask &= LOCKF_USED_IN_IRQ_ALL;
+       if (!usage_mask)
+               return 1;
 
        /*
-        * Prove that the new dependency does not connect a hardirq-safe-read
-        * lock with a hardirq-unsafe lock - to achieve this we search
-        * the backwards-subgraph starting at <prev>, and the
-        * forwards-subgraph starting at <next>:
+        * Step 2: find exclusive uses forward that match the previous
+        * backward accumulated mask.
         */
-       if (!check_usage(curr, prev, next, bit,
-                          exclusive_bit(bit), state_name(bit)))
-               return 0;
+       forward_mask = exclusive_mask(usage_mask);
 
-       return 1;
-}
+       that.parent = NULL;
+       that.class = hlock_class(next);
 
-static int
-check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
-               struct held_lock *next)
-{
-#define LOCKDEP_STATE(__STATE)                                         \
-       if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
-               return 0;
-#include "lockdep_states.h"
-#undef LOCKDEP_STATE
+       ret = find_usage_forwards(&that, forward_mask, &target_entry1);
+       if (ret < 0)
+               return print_bfs_bug(ret);
+       if (ret == 1)
+               return ret;
 
-       return 1;
+       /*
+        * Step 3: we found a bad match! Now retrieve a lock from the backward
+        * list whose usage mask matches the exclusive usage mask from the
+        * lock found on the forward list.
+        */
+       backward_mask = original_mask(target_entry1->class->usage_mask);
+
+       ret = find_usage_backwards(&this, backward_mask, &target_entry);
+       if (ret < 0)
+               return print_bfs_bug(ret);
+       if (DEBUG_LOCKS_WARN_ON(ret == 1))
+               return 1;
+
+       /*
+        * Step 4: narrow down to a pair of incompatible usage bits
+        * and report it.
+        */
+       ret = find_exclusive_match(target_entry->class->usage_mask,
+                                  target_entry1->class->usage_mask,
+                                  &backward_bit, &forward_bit);
+       if (DEBUG_LOCKS_WARN_ON(ret == -1))
+               return 1;
+
+       return print_bad_irq_dependency(curr, &this, &that,
+                       target_entry, target_entry1,
+                       prev, next,
+                       backward_bit, forward_bit,
+                       state_name(backward_bit));
 }
 
 static void inc_chains(void)
@@ -2040,9 +2128,8 @@ static void inc_chains(void)
 
 #else
 
-static inline int
-check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
-               struct held_lock *next)
+static inline int check_irq_usage(struct task_struct *curr,
+                                 struct held_lock *prev, struct held_lock *next)
 {
        return 1;
 }
@@ -2170,8 +2257,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
  */
 static int
 check_prev_add(struct task_struct *curr, struct held_lock *prev,
-              struct held_lock *next, int distance, struct stack_trace *trace,
-              int (*save)(struct stack_trace *trace))
+              struct held_lock *next, int distance, struct lock_trace *trace)
 {
        struct lock_list *uninitialized_var(target_entry);
        struct lock_list *entry;
@@ -2209,20 +2295,20 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
        this.parent = NULL;
        ret = check_noncircular(&this, hlock_class(prev), &target_entry);
        if (unlikely(!ret)) {
-               if (!trace->entries) {
+               if (!trace->nr_entries) {
                        /*
-                        * If @save fails here, the printing might trigger
-                        * a WARN but because of the !nr_entries it should
-                        * not do bad things.
+                        * If save_trace fails here, the printing might
+                        * trigger a WARN but because of the !nr_entries it
+                        * should not do bad things.
                         */
-                       save(trace);
+                       save_trace(trace);
                }
-               return print_circular_bug(&this, target_entry, next, prev, trace);
+               return print_circular_bug(&this, target_entry, next, prev);
        }
        else if (unlikely(ret < 0))
                return print_bfs_bug(ret);
 
-       if (!check_prev_add_irq(curr, prev, next))
+       if (!check_irq_usage(curr, prev, next))
                return 0;
 
        /*
@@ -2265,7 +2351,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
                return print_bfs_bug(ret);
 
 
-       if (!trace->entries && !save(trace))
+       if (!trace->nr_entries && !save_trace(trace))
                return 0;
 
        /*
@@ -2297,14 +2383,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
 static int
 check_prevs_add(struct task_struct *curr, struct held_lock *next)
 {
+       struct lock_trace trace = { .nr_entries = 0 };
        int depth = curr->lockdep_depth;
        struct held_lock *hlock;
-       struct stack_trace trace = {
-               .nr_entries = 0,
-               .max_entries = 0,
-               .entries = NULL,
-               .skip = 0,
-       };
 
        /*
         * Debugging checks.
@@ -2330,7 +2411,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
                 * added:
                 */
                if (hlock->read != 2 && hlock->check) {
-                       int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
+                       int ret = check_prev_add(curr, hlock, next, distance,
+                                                &trace);
                        if (!ret)
                                return 0;
 
@@ -2731,6 +2813,10 @@ static inline int validate_chain(struct task_struct *curr,
 {
        return 1;
 }
+
+static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
+{
+}
 #endif
 
 /*
@@ -2784,6 +2870,12 @@ static void check_chain_key(struct task_struct *curr)
 #endif
 }
 
+static int mark_lock(struct task_struct *curr, struct held_lock *this,
+                    enum lock_usage_bit new_bit);
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
+
+
 static void
 print_usage_bug_scenario(struct held_lock *lock)
 {
@@ -2827,7 +2919,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
        print_lock(this);
 
        pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
-       print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
+       print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1);
 
        print_irqtrace_events(curr);
        pr_warn("\nother info that might help us debug this:\n");
@@ -2853,10 +2945,6 @@ valid_state(struct task_struct *curr, struct held_lock *this,
        return 1;
 }
 
-static int mark_lock(struct task_struct *curr, struct held_lock *this,
-                    enum lock_usage_bit new_bit);
-
-#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
 
 /*
  * print irq inversion bug:
@@ -2936,7 +3024,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
 
        root.parent = NULL;
        root.class = hlock_class(this);
-       ret = find_usage_forwards(&root, bit, &target_entry);
+       ret = find_usage_forwards(&root, lock_flag(bit), &target_entry);
        if (ret < 0)
                return print_bfs_bug(ret);
        if (ret == 1)
@@ -2960,7 +3048,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
 
        root.parent = NULL;
        root.class = hlock_class(this);
-       ret = find_usage_backwards(&root, bit, &target_entry);
+       ret = find_usage_backwards(&root, lock_flag(bit), &target_entry);
        if (ret < 0)
                return print_bfs_bug(ret);
        if (ret == 1)
@@ -3015,7 +3103,7 @@ static int (*state_verbose_f[])(struct lock_class *class) = {
 static inline int state_verbose(enum lock_usage_bit bit,
                                struct lock_class *class)
 {
-       return state_verbose_f[bit >> 2](class);
+       return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class);
 }
 
 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
@@ -3157,7 +3245,7 @@ void lockdep_hardirqs_on(unsigned long ip)
        /*
         * See the fine text that goes along with this variable definition.
         */
-       if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
+       if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
                return;
 
        /*
@@ -4689,8 +4777,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
                return;
 
        raw_local_irq_save(flags);
-       if (!graph_lock())
-               goto out_irq;
+       arch_spin_lock(&lockdep_lock);
+       current->lockdep_recursion = 1;
 
        /* closed head */
        pf = delayed_free.pf + (delayed_free.index ^ 1);
@@ -4702,8 +4790,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
         */
        call_rcu_zapped(delayed_free.pf + delayed_free.index);
 
-       graph_unlock();
-out_irq:
+       current->lockdep_recursion = 0;
+       arch_spin_unlock(&lockdep_lock);
        raw_local_irq_restore(flags);
 }
 
@@ -4744,21 +4832,17 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
 {
        struct pending_free *pf;
        unsigned long flags;
-       int locked;
 
        init_data_structures_once();
 
        raw_local_irq_save(flags);
-       locked = graph_lock();
-       if (!locked)
-               goto out_irq;
-
+       arch_spin_lock(&lockdep_lock);
+       current->lockdep_recursion = 1;
        pf = get_pending_free();
        __lockdep_free_key_range(pf, start, size);
        call_rcu_zapped(pf);
-
-       graph_unlock();
-out_irq:
+       current->lockdep_recursion = 0;
+       arch_spin_unlock(&lockdep_lock);
        raw_local_irq_restore(flags);
 
        /*
index d4c197425f68a95919bd239beb19cf6d74d89d02..150ec3f0c5b5dae8d49517bee8b46cbf55004039 100644 (file)
@@ -42,13 +42,35 @@ enum {
        __LOCKF(USED)
 };
 
-#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
-#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
+#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE |
+static const unsigned long LOCKF_ENABLED_IRQ =
+#include "lockdep_states.h"
+       0;
+#undef LOCKDEP_STATE
+
+#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE |
+static const unsigned long LOCKF_USED_IN_IRQ =
+#include "lockdep_states.h"
+       0;
+#undef LOCKDEP_STATE
+
+#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ |
+static const unsigned long LOCKF_ENABLED_IRQ_READ =
+#include "lockdep_states.h"
+       0;
+#undef LOCKDEP_STATE
+
+#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ |
+static const unsigned long LOCKF_USED_IN_IRQ_READ =
+#include "lockdep_states.h"
+       0;
+#undef LOCKDEP_STATE
+
+#define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
+#define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
 
-#define LOCKF_ENABLED_IRQ_READ \
-               (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
-#define LOCKF_USED_IN_IRQ_READ \
-               (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
+#define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
+#define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
 
 /*
  * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
index ad40a2617063c4903afb5ba4942ce16ff1a8ce93..80a463d31a8d95a89ab2fb5cdc2b99686cb872d0 100644 (file)
@@ -829,7 +829,9 @@ static void lock_torture_cleanup(void)
                                                "End of test: SUCCESS");
 
        kfree(cxt.lwsa);
+       cxt.lwsa = NULL;
        kfree(cxt.lrsa);
+       cxt.lrsa = NULL;
 
 end:
        torture_cleanup_end();
index 883cf1b92d9084f30a21f699211d6cd2ca3b9362..f17dad99eec8b76ca3e1b83963308ac6a4c10a7d 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/sched.h>
 #include <linux/errno.h>
 
+#include "rwsem.h"
+
 int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
                        const char *name, struct lock_class_key *rwsem_key)
 {
index 5e9247dc2515833e78e5f0899795b1228542da2f..e14b32c69639e7dbf6cabbf62387af8d36222764 100644 (file)
@@ -395,7 +395,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
         * 0,1,0 -> 0,0,1
         */
        clear_pending_set_locked(lock);
-       qstat_inc(qstat_lock_pending, true);
+       lockevent_inc(lock_pending);
        return;
 
        /*
@@ -403,7 +403,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
         * queuing.
         */
 queue:
-       qstat_inc(qstat_lock_slowpath, true);
+       lockevent_inc(lock_slowpath);
 pv_queue:
        node = this_cpu_ptr(&qnodes[0].mcs);
        idx = node->count++;
@@ -419,7 +419,7 @@ pv_queue:
         * simple enough.
         */
        if (unlikely(idx >= MAX_NODES)) {
-               qstat_inc(qstat_lock_no_node, true);
+               lockevent_inc(lock_no_node);
                while (!queued_spin_trylock(lock))
                        cpu_relax();
                goto release;
@@ -430,7 +430,7 @@ pv_queue:
        /*
         * Keep counts of non-zero index values:
         */
-       qstat_inc(qstat_lock_use_node2 + idx - 1, idx);
+       lockevent_cond_inc(lock_use_node2 + idx - 1, idx);
 
        /*
         * Ensure that we increment the head node->count before initialising
index 8f36c27c17948c8e34b8488af540b60c35e77a22..89bab079e7a4d9f939d6bd923d1ac62b0d70d72b 100644 (file)
@@ -89,7 +89,7 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
 
                if (!(val & _Q_LOCKED_PENDING_MASK) &&
                   (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
-                       qstat_inc(qstat_pv_lock_stealing, true);
+                       lockevent_inc(pv_lock_stealing);
                        return true;
                }
                if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK))
@@ -219,7 +219,7 @@ static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
                hopcnt++;
                if (!cmpxchg(&he->lock, NULL, lock)) {
                        WRITE_ONCE(he->node, node);
-                       qstat_hop(hopcnt);
+                       lockevent_pv_hop(hopcnt);
                        return &he->lock;
                }
        }
@@ -320,8 +320,8 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
                smp_store_mb(pn->state, vcpu_halted);
 
                if (!READ_ONCE(node->locked)) {
-                       qstat_inc(qstat_pv_wait_node, true);
-                       qstat_inc(qstat_pv_wait_early, wait_early);
+                       lockevent_inc(pv_wait_node);
+                       lockevent_cond_inc(pv_wait_early, wait_early);
                        pv_wait(&pn->state, vcpu_halted);
                }
 
@@ -339,7 +339,8 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
                 * So it is better to spin for a while in the hope that the
                 * MCS lock will be released soon.
                 */
-               qstat_inc(qstat_pv_spurious_wakeup, !READ_ONCE(node->locked));
+               lockevent_cond_inc(pv_spurious_wakeup,
+                                 !READ_ONCE(node->locked));
        }
 
        /*
@@ -416,7 +417,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
        /*
         * Tracking # of slowpath locking operations
         */
-       qstat_inc(qstat_lock_slowpath, true);
+       lockevent_inc(lock_slowpath);
 
        for (;; waitcnt++) {
                /*
@@ -464,8 +465,8 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
                        }
                }
                WRITE_ONCE(pn->state, vcpu_hashed);
-               qstat_inc(qstat_pv_wait_head, true);
-               qstat_inc(qstat_pv_wait_again, waitcnt);
+               lockevent_inc(pv_wait_head);
+               lockevent_cond_inc(pv_wait_again, waitcnt);
                pv_wait(&lock->locked, _Q_SLOW_VAL);
 
                /*
@@ -528,7 +529,7 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
         * vCPU is harmless other than the additional latency in completing
         * the unlock.
         */
-       qstat_inc(qstat_pv_kick_unlock, true);
+       lockevent_inc(pv_kick_unlock);
        pv_kick(node->cpu);
 }
 
index d73f85388d5c17594190009f213a37bbd33c13c2..54152670ff2489fd7997e6505f26da957c35b492 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * Authors: Waiman Long <waiman.long@hpe.com>
+ * Authors: Waiman Long <longman@redhat.com>
  */
 
-/*
- * When queued spinlock statistical counters are enabled, the following
- * debugfs files will be created for reporting the counter values:
- *
- * <debugfs>/qlockstat/
- *   pv_hash_hops      - average # of hops per hashing operation
- *   pv_kick_unlock    - # of vCPU kicks issued at unlock time
- *   pv_kick_wake      - # of vCPU kicks used for computing pv_latency_wake
- *   pv_latency_kick   - average latency (ns) of vCPU kick operation
- *   pv_latency_wake   - average latency (ns) from vCPU kick to wakeup
- *   pv_lock_stealing  - # of lock stealing operations
- *   pv_spurious_wakeup        - # of spurious wakeups in non-head vCPUs
- *   pv_wait_again     - # of wait's after a queue head vCPU kick
- *   pv_wait_early     - # of early vCPU wait's
- *   pv_wait_head      - # of vCPU wait's at the queue head
- *   pv_wait_node      - # of vCPU wait's at a non-head queue node
- *   lock_pending      - # of locking operations via pending code
- *   lock_slowpath     - # of locking operations via MCS lock queue
- *   lock_use_node2    - # of locking operations that use 2nd per-CPU node
- *   lock_use_node3    - # of locking operations that use 3rd per-CPU node
- *   lock_use_node4    - # of locking operations that use 4th per-CPU node
- *   lock_no_node      - # of locking operations without using per-CPU node
- *
- * Subtracting lock_use_node[234] from lock_slowpath will give you
- * lock_use_node1.
- *
- * Writing to the "reset_counters" file will reset all the above counter
- * values.
- *
- * These statistical counters are implemented as per-cpu variables which are
- * summed and computed whenever the corresponding debugfs files are read. This
- * minimizes added overhead making the counters usable even in a production
- * environment.
- *
- * There may be slight difference between pv_kick_wake and pv_kick_unlock.
- */
-enum qlock_stats {
-       qstat_pv_hash_hops,
-       qstat_pv_kick_unlock,
-       qstat_pv_kick_wake,
-       qstat_pv_latency_kick,
-       qstat_pv_latency_wake,
-       qstat_pv_lock_stealing,
-       qstat_pv_spurious_wakeup,
-       qstat_pv_wait_again,
-       qstat_pv_wait_early,
-       qstat_pv_wait_head,
-       qstat_pv_wait_node,
-       qstat_lock_pending,
-       qstat_lock_slowpath,
-       qstat_lock_use_node2,
-       qstat_lock_use_node3,
-       qstat_lock_use_node4,
-       qstat_lock_no_node,
-       qstat_num,      /* Total number of statistical counters */
-       qstat_reset_cnts = qstat_num,
-};
+#include "lock_events.h"
 
-#ifdef CONFIG_QUEUED_LOCK_STAT
+#ifdef CONFIG_LOCK_EVENT_COUNTS
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
 /*
- * Collect pvqspinlock statistics
+ * Collect pvqspinlock locking event counts
  */
-#include <linux/debugfs.h>
 #include <linux/sched.h>
 #include <linux/sched/clock.h>
 #include <linux/fs.h>
 
-static const char * const qstat_names[qstat_num + 1] = {
-       [qstat_pv_hash_hops]       = "pv_hash_hops",
-       [qstat_pv_kick_unlock]     = "pv_kick_unlock",
-       [qstat_pv_kick_wake]       = "pv_kick_wake",
-       [qstat_pv_spurious_wakeup] = "pv_spurious_wakeup",
-       [qstat_pv_latency_kick]    = "pv_latency_kick",
-       [qstat_pv_latency_wake]    = "pv_latency_wake",
-       [qstat_pv_lock_stealing]   = "pv_lock_stealing",
-       [qstat_pv_wait_again]      = "pv_wait_again",
-       [qstat_pv_wait_early]      = "pv_wait_early",
-       [qstat_pv_wait_head]       = "pv_wait_head",
-       [qstat_pv_wait_node]       = "pv_wait_node",
-       [qstat_lock_pending]       = "lock_pending",
-       [qstat_lock_slowpath]      = "lock_slowpath",
-       [qstat_lock_use_node2]     = "lock_use_node2",
-       [qstat_lock_use_node3]     = "lock_use_node3",
-       [qstat_lock_use_node4]     = "lock_use_node4",
-       [qstat_lock_no_node]       = "lock_no_node",
-       [qstat_reset_cnts]         = "reset_counters",
-};
+#define EVENT_COUNT(ev)        lockevents[LOCKEVENT_ ## ev]
 
 /*
- * Per-cpu counters
+ * PV specific per-cpu counter
  */
-static DEFINE_PER_CPU(unsigned long, qstats[qstat_num]);
 static DEFINE_PER_CPU(u64, pv_kick_time);
 
 /*
- * Function to read and return the qlock statistical counter values
+ * Function to read and return the PV qspinlock counts.
  *
  * The following counters are handled specially:
- * 1. qstat_pv_latency_kick
+ * 1. pv_latency_kick
  *    Average kick latency (ns) = pv_latency_kick/pv_kick_unlock
- * 2. qstat_pv_latency_wake
+ * 2. pv_latency_wake
  *    Average wake latency (ns) = pv_latency_wake/pv_kick_wake
- * 3. qstat_pv_hash_hops
+ * 3. pv_hash_hops
  *    Average hops/hash = pv_hash_hops/pv_kick_unlock
  */
-static ssize_t qstat_read(struct file *file, char __user *user_buf,
-                         size_t count, loff_t *ppos)
+ssize_t lockevent_read(struct file *file, char __user *user_buf,
+                      size_t count, loff_t *ppos)
 {
        char buf[64];
-       int cpu, counter, len;
-       u64 stat = 0, kicks = 0;
+       int cpu, id, len;
+       u64 sum = 0, kicks = 0;
 
        /*
         * Get the counter ID stored in file->f_inode->i_private
         */
-       counter = (long)file_inode(file)->i_private;
+       id = (long)file_inode(file)->i_private;
 
-       if (counter >= qstat_num)
+       if (id >= lockevent_num)
                return -EBADF;
 
        for_each_possible_cpu(cpu) {
-               stat += per_cpu(qstats[counter], cpu);
+               sum += per_cpu(lockevents[id], cpu);
                /*
-                * Need to sum additional counter for some of them
+                * Need to sum additional counters for some of them
                 */
-               switch (counter) {
+               switch (id) {
 
-               case qstat_pv_latency_kick:
-               case qstat_pv_hash_hops:
-                       kicks += per_cpu(qstats[qstat_pv_kick_unlock], cpu);
+               case LOCKEVENT_pv_latency_kick:
+               case LOCKEVENT_pv_hash_hops:
+                       kicks += per_cpu(EVENT_COUNT(pv_kick_unlock), cpu);
                        break;
 
-               case qstat_pv_latency_wake:
-                       kicks += per_cpu(qstats[qstat_pv_kick_wake], cpu);
+               case LOCKEVENT_pv_latency_wake:
+                       kicks += per_cpu(EVENT_COUNT(pv_kick_wake), cpu);
                        break;
                }
        }
 
-       if (counter == qstat_pv_hash_hops) {
+       if (id == LOCKEVENT_pv_hash_hops) {
                u64 frac = 0;
 
                if (kicks) {
-                       frac = 100ULL * do_div(stat, kicks);
+                       frac = 100ULL * do_div(sum, kicks);
                        frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
                }
 
                /*
                 * Return a X.XX decimal number
                 */
-               len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n", stat, frac);
+               len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n",
+                              sum, frac);
        } else {
                /*
                 * Round to the nearest ns
                 */
-               if ((counter == qstat_pv_latency_kick) ||
-                   (counter == qstat_pv_latency_wake)) {
+               if ((id == LOCKEVENT_pv_latency_kick) ||
+                   (id == LOCKEVENT_pv_latency_wake)) {
                        if (kicks)
-                               stat = DIV_ROUND_CLOSEST_ULL(stat, kicks);
+                               sum = DIV_ROUND_CLOSEST_ULL(sum, kicks);
                }
-               len = snprintf(buf, sizeof(buf) - 1, "%llu\n", stat);
+               len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum);
        }
 
        return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
-/*
- * Function to handle write request
- *
- * When counter = reset_cnts, reset all the counter values.
- * Since the counter updates aren't atomic, the resetting is done twice
- * to make sure that the counters are very likely to be all cleared.
- */
-static ssize_t qstat_write(struct file *file, const char __user *user_buf,
-                          size_t count, loff_t *ppos)
-{
-       int cpu;
-
-       /*
-        * Get the counter ID stored in file->f_inode->i_private
-        */
-       if ((long)file_inode(file)->i_private != qstat_reset_cnts)
-               return count;
-
-       for_each_possible_cpu(cpu) {
-               int i;
-               unsigned long *ptr = per_cpu_ptr(qstats, cpu);
-
-               for (i = 0 ; i < qstat_num; i++)
-                       WRITE_ONCE(ptr[i], 0);
-       }
-       return count;
-}
-
-/*
- * Debugfs data structures
- */
-static const struct file_operations fops_qstat = {
-       .read = qstat_read,
-       .write = qstat_write,
-       .llseek = default_llseek,
-};
-
-/*
- * Initialize debugfs for the qspinlock statistical counters
- */
-static int __init init_qspinlock_stat(void)
-{
-       struct dentry *d_qstat = debugfs_create_dir("qlockstat", NULL);
-       int i;
-
-       if (!d_qstat)
-               goto out;
-
-       /*
-        * Create the debugfs files
-        *
-        * As reading from and writing to the stat files can be slow, only
-        * root is allowed to do the read/write to limit impact to system
-        * performance.
-        */
-       for (i = 0; i < qstat_num; i++)
-               if (!debugfs_create_file(qstat_names[i], 0400, d_qstat,
-                                        (void *)(long)i, &fops_qstat))
-                       goto fail_undo;
-
-       if (!debugfs_create_file(qstat_names[qstat_reset_cnts], 0200, d_qstat,
-                                (void *)(long)qstat_reset_cnts, &fops_qstat))
-               goto fail_undo;
-
-       return 0;
-fail_undo:
-       debugfs_remove_recursive(d_qstat);
-out:
-       pr_warn("Could not create 'qlockstat' debugfs entries\n");
-       return -ENOMEM;
-}
-fs_initcall(init_qspinlock_stat);
-
-/*
- * Increment the PV qspinlock statistical counters
- */
-static inline void qstat_inc(enum qlock_stats stat, bool cond)
-{
-       if (cond)
-               this_cpu_inc(qstats[stat]);
-}
-
 /*
  * PV hash hop count
  */
-static inline void qstat_hop(int hopcnt)
+static inline void lockevent_pv_hop(int hopcnt)
 {
-       this_cpu_add(qstats[qstat_pv_hash_hops], hopcnt);
+       this_cpu_add(EVENT_COUNT(pv_hash_hops), hopcnt);
 }
 
 /*
@@ -276,7 +119,7 @@ static inline void __pv_kick(int cpu)
 
        per_cpu(pv_kick_time, cpu) = start;
        pv_kick(cpu);
-       this_cpu_add(qstats[qstat_pv_latency_kick], sched_clock() - start);
+       this_cpu_add(EVENT_COUNT(pv_latency_kick), sched_clock() - start);
 }
 
 /*
@@ -289,18 +132,19 @@ static inline void __pv_wait(u8 *ptr, u8 val)
        *pkick_time = 0;
        pv_wait(ptr, val);
        if (*pkick_time) {
-               this_cpu_add(qstats[qstat_pv_latency_wake],
+               this_cpu_add(EVENT_COUNT(pv_latency_wake),
                             sched_clock() - *pkick_time);
-               qstat_inc(qstat_pv_kick_wake, true);
+               lockevent_inc(pv_kick_wake);
        }
 }
 
 #define pv_kick(c)     __pv_kick(c)
 #define pv_wait(p, v)  __pv_wait(p, v)
 
-#else /* CONFIG_QUEUED_LOCK_STAT */
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
+#else /* CONFIG_LOCK_EVENT_COUNTS */
 
-static inline void qstat_inc(enum qlock_stats stat, bool cond) { }
-static inline void qstat_hop(int hopcnt)                       { }
+static inline void lockevent_pv_hop(int hopcnt)        { }
 
-#endif /* CONFIG_QUEUED_LOCK_STAT */
+#endif /* CONFIG_LOCK_EVENT_COUNTS */
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
deleted file mode 100644 (file)
index a7ffb2a..0000000
+++ /dev/null
@@ -1,339 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
- * generic spinlock implementation
- *
- * Copyright (c) 2001   David Howells (dhowells@redhat.com).
- * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
- * - Derived also from comments by Linus
- */
-#include <linux/rwsem.h>
-#include <linux/sched/signal.h>
-#include <linux/sched/debug.h>
-#include <linux/export.h>
-
-enum rwsem_waiter_type {
-       RWSEM_WAITING_FOR_WRITE,
-       RWSEM_WAITING_FOR_READ
-};
-
-struct rwsem_waiter {
-       struct list_head list;
-       struct task_struct *task;
-       enum rwsem_waiter_type type;
-};
-
-int rwsem_is_locked(struct rw_semaphore *sem)
-{
-       int ret = 1;
-       unsigned long flags;
-
-       if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
-               ret = (sem->count != 0);
-               raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-       }
-       return ret;
-}
-EXPORT_SYMBOL(rwsem_is_locked);
-
-/*
- * initialise the semaphore
- */
-void __init_rwsem(struct rw_semaphore *sem, const char *name,
-                 struct lock_class_key *key)
-{
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-       /*
-        * Make sure we are not reinitializing a held semaphore:
-        */
-       debug_check_no_locks_freed((void *)sem, sizeof(*sem));
-       lockdep_init_map(&sem->dep_map, name, key, 0);
-#endif
-       sem->count = 0;
-       raw_spin_lock_init(&sem->wait_lock);
-       INIT_LIST_HEAD(&sem->wait_list);
-}
-EXPORT_SYMBOL(__init_rwsem);
-
-/*
- * handle the lock release when processes blocked on it that can now run
- * - if we come here, then:
- *   - the 'active count' _reached_ zero
- *   - the 'waiting count' is non-zero
- * - the spinlock must be held by the caller
- * - woken process blocks are discarded from the list after having task zeroed
- * - writers are only woken if wakewrite is non-zero
- */
-static inline struct rw_semaphore *
-__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
-{
-       struct rwsem_waiter *waiter;
-       struct task_struct *tsk;
-       int woken;
-
-       waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
-
-       if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
-               if (wakewrite)
-                       /* Wake up a writer. Note that we do not grant it the
-                        * lock - it will have to acquire it when it runs. */
-                       wake_up_process(waiter->task);
-               goto out;
-       }
-
-       /* grant an infinite number of read locks to the front of the queue */
-       woken = 0;
-       do {
-               struct list_head *next = waiter->list.next;
-
-               list_del(&waiter->list);
-               tsk = waiter->task;
-               /*
-                * Make sure we do not wakeup the next reader before
-                * setting the nil condition to grant the next reader;
-                * otherwise we could miss the wakeup on the other
-                * side and end up sleeping again. See the pairing
-                * in rwsem_down_read_failed().
-                */
-               smp_mb();
-               waiter->task = NULL;
-               wake_up_process(tsk);
-               put_task_struct(tsk);
-               woken++;
-               if (next == &sem->wait_list)
-                       break;
-               waiter = list_entry(next, struct rwsem_waiter, list);
-       } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
-
-       sem->count += woken;
-
- out:
-       return sem;
-}
-
-/*
- * wake a single writer
- */
-static inline struct rw_semaphore *
-__rwsem_wake_one_writer(struct rw_semaphore *sem)
-{
-       struct rwsem_waiter *waiter;
-
-       waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
-       wake_up_process(waiter->task);
-
-       return sem;
-}
-
-/*
- * get a read lock on the semaphore
- */
-int __sched __down_read_common(struct rw_semaphore *sem, int state)
-{
-       struct rwsem_waiter waiter;
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
-       if (sem->count >= 0 && list_empty(&sem->wait_list)) {
-               /* granted */
-               sem->count++;
-               raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-               goto out;
-       }
-
-       /* set up my own style of waitqueue */
-       waiter.task = current;
-       waiter.type = RWSEM_WAITING_FOR_READ;
-       get_task_struct(current);
-
-       list_add_tail(&waiter.list, &sem->wait_list);
-
-       /* wait to be given the lock */
-       for (;;) {
-               if (!waiter.task)
-                       break;
-               if (signal_pending_state(state, current))
-                       goto out_nolock;
-               set_current_state(state);
-               raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-               schedule();
-               raw_spin_lock_irqsave(&sem->wait_lock, flags);
-       }
-
-       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- out:
-       return 0;
-
-out_nolock:
-       /*
-        * We didn't take the lock, so that there is a writer, which
-        * is owner or the first waiter of the sem. If it's a waiter,
-        * it will be woken by current owner. Not need to wake anybody.
-        */
-       list_del(&waiter.list);
-       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-       return -EINTR;
-}
-
-void __sched __down_read(struct rw_semaphore *sem)
-{
-       __down_read_common(sem, TASK_UNINTERRUPTIBLE);
-}
-
-int __sched __down_read_killable(struct rw_semaphore *sem)
-{
-       return __down_read_common(sem, TASK_KILLABLE);
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-int __down_read_trylock(struct rw_semaphore *sem)
-{
-       unsigned long flags;
-       int ret = 0;
-
-
-       raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
-       if (sem->count >= 0 && list_empty(&sem->wait_list)) {
-               /* granted */
-               sem->count++;
-               ret = 1;
-       }
-
-       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-
-       return ret;
-}
-
-/*
- * get a write lock on the semaphore
- */
-int __sched __down_write_common(struct rw_semaphore *sem, int state)
-{
-       struct rwsem_waiter waiter;
-       unsigned long flags;
-       int ret = 0;
-
-       raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
-       /* set up my own style of waitqueue */
-       waiter.task = current;
-       waiter.type = RWSEM_WAITING_FOR_WRITE;
-       list_add_tail(&waiter.list, &sem->wait_list);
-
-       /* wait for someone to release the lock */
-       for (;;) {
-               /*
-                * That is the key to support write lock stealing: allows the
-                * task already on CPU to get the lock soon rather than put
-                * itself into sleep and waiting for system woke it or someone
-                * else in the head of the wait list up.
-                */
-               if (sem->count == 0)
-                       break;
-               if (signal_pending_state(state, current))
-                       goto out_nolock;
-
-               set_current_state(state);
-               raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-               schedule();
-               raw_spin_lock_irqsave(&sem->wait_lock, flags);
-       }
-       /* got the lock */
-       sem->count = -1;
-       list_del(&waiter.list);
-
-       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-
-       return ret;
-
-out_nolock:
-       list_del(&waiter.list);
-       if (!list_empty(&sem->wait_list) && sem->count >= 0)
-               __rwsem_do_wake(sem, 0);
-       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-
-       return -EINTR;
-}
-
-void __sched __down_write(struct rw_semaphore *sem)
-{
-       __down_write_common(sem, TASK_UNINTERRUPTIBLE);
-}
-
-int __sched __down_write_killable(struct rw_semaphore *sem)
-{
-       return __down_write_common(sem, TASK_KILLABLE);
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-int __down_write_trylock(struct rw_semaphore *sem)
-{
-       unsigned long flags;
-       int ret = 0;
-
-       raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
-       if (sem->count == 0) {
-               /* got the lock */
-               sem->count = -1;
-               ret = 1;
-       }
-
-       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-
-       return ret;
-}
-
-/*
- * release a read lock on the semaphore
- */
-void __up_read(struct rw_semaphore *sem)
-{
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
-       if (--sem->count == 0 && !list_empty(&sem->wait_list))
-               sem = __rwsem_wake_one_writer(sem);
-
-       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-}
-
-/*
- * release a write lock on the semaphore
- */
-void __up_write(struct rw_semaphore *sem)
-{
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
-       sem->count = 0;
-       if (!list_empty(&sem->wait_list))
-               sem = __rwsem_do_wake(sem, 1);
-
-       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-}
-
-/*
- * downgrade a write lock into a read lock
- * - just wake up any readers at the front of the queue
- */
-void __downgrade_write(struct rw_semaphore *sem)
-{
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
-       sem->count = 1;
-       if (!list_empty(&sem->wait_list))
-               sem = __rwsem_do_wake(sem, 0);
-
-       raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-}
-
index fbe96341beeed2c37f84526cb70adbe1b8734156..6b3ee9948bf17a37f5be8a730064f3e5d7374774 100644 (file)
@@ -147,6 +147,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
                         * will notice the queued writer.
                         */
                        wake_q_add(wake_q, waiter->task);
+                       lockevent_inc(rwsem_wake_writer);
                }
 
                return;
@@ -176,9 +177,8 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
                        goto try_reader_grant;
                }
                /*
-                * It is not really necessary to set it to reader-owned here,
-                * but it gives the spinners an early indication that the
-                * readers now have the lock.
+                * Set it to reader-owned to give spinners an early
+                * indication that readers now have the lock.
                 */
                __rwsem_set_reader_owned(sem, waiter->task);
        }
@@ -215,6 +215,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
        }
 
        adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
+       lockevent_cond_inc(rwsem_wake_reader, woken);
        if (list_empty(&sem->wait_list)) {
                /* hit end of list above */
                adjustment -= RWSEM_WAITING_BIAS;
@@ -224,92 +225,6 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
                atomic_long_add(adjustment, &sem->count);
 }
 
-/*
- * Wait for the read lock to be granted
- */
-static inline struct rw_semaphore __sched *
-__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
-{
-       long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
-       struct rwsem_waiter waiter;
-       DEFINE_WAKE_Q(wake_q);
-
-       waiter.task = current;
-       waiter.type = RWSEM_WAITING_FOR_READ;
-
-       raw_spin_lock_irq(&sem->wait_lock);
-       if (list_empty(&sem->wait_list)) {
-               /*
-                * In case the wait queue is empty and the lock isn't owned
-                * by a writer, this reader can exit the slowpath and return
-                * immediately as its RWSEM_ACTIVE_READ_BIAS has already
-                * been set in the count.
-                */
-               if (atomic_long_read(&sem->count) >= 0) {
-                       raw_spin_unlock_irq(&sem->wait_lock);
-                       return sem;
-               }
-               adjustment += RWSEM_WAITING_BIAS;
-       }
-       list_add_tail(&waiter.list, &sem->wait_list);
-
-       /* we're now waiting on the lock, but no longer actively locking */
-       count = atomic_long_add_return(adjustment, &sem->count);
-
-       /*
-        * If there are no active locks, wake the front queued process(es).
-        *
-        * If there are no writers and we are first in the queue,
-        * wake our own waiter to join the existing active readers !
-        */
-       if (count == RWSEM_WAITING_BIAS ||
-           (count > RWSEM_WAITING_BIAS &&
-            adjustment != -RWSEM_ACTIVE_READ_BIAS))
-               __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
-
-       raw_spin_unlock_irq(&sem->wait_lock);
-       wake_up_q(&wake_q);
-
-       /* wait to be given the lock */
-       while (true) {
-               set_current_state(state);
-               if (!waiter.task)
-                       break;
-               if (signal_pending_state(state, current)) {
-                       raw_spin_lock_irq(&sem->wait_lock);
-                       if (waiter.task)
-                               goto out_nolock;
-                       raw_spin_unlock_irq(&sem->wait_lock);
-                       break;
-               }
-               schedule();
-       }
-
-       __set_current_state(TASK_RUNNING);
-       return sem;
-out_nolock:
-       list_del(&waiter.list);
-       if (list_empty(&sem->wait_list))
-               atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
-       raw_spin_unlock_irq(&sem->wait_lock);
-       __set_current_state(TASK_RUNNING);
-       return ERR_PTR(-EINTR);
-}
-
-__visible struct rw_semaphore * __sched
-rwsem_down_read_failed(struct rw_semaphore *sem)
-{
-       return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(rwsem_down_read_failed);
-
-__visible struct rw_semaphore * __sched
-rwsem_down_read_failed_killable(struct rw_semaphore *sem)
-{
-       return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
-}
-EXPORT_SYMBOL(rwsem_down_read_failed_killable);
-
 /*
  * This function must be called with the sem->wait_lock held to prevent
  * race conditions between checking the rwsem wait list and setting the
@@ -346,21 +261,17 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
  */
 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
 {
-       long old, count = atomic_long_read(&sem->count);
-
-       while (true) {
-               if (!(count == 0 || count == RWSEM_WAITING_BIAS))
-                       return false;
+       long count = atomic_long_read(&sem->count);
 
-               old = atomic_long_cmpxchg_acquire(&sem->count, count,
-                                     count + RWSEM_ACTIVE_WRITE_BIAS);
-               if (old == count) {
+       while (!count || count == RWSEM_WAITING_BIAS) {
+               if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
+                                       count + RWSEM_ACTIVE_WRITE_BIAS)) {
                        rwsem_set_owner(sem);
+                       lockevent_inc(rwsem_opt_wlock);
                        return true;
                }
-
-               count = old;
        }
+       return false;
 }
 
 static inline bool owner_on_cpu(struct task_struct *owner)
@@ -481,6 +392,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
        osq_unlock(&sem->osq);
 done:
        preempt_enable();
+       lockevent_cond_inc(rwsem_opt_fail, !taken);
        return taken;
 }
 
@@ -504,6 +416,97 @@ static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
 }
 #endif
 
+/*
+ * Wait for the read lock to be granted
+ */
+static inline struct rw_semaphore __sched *
+__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
+{
+       long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
+       struct rwsem_waiter waiter;
+       DEFINE_WAKE_Q(wake_q);
+
+       waiter.task = current;
+       waiter.type = RWSEM_WAITING_FOR_READ;
+
+       raw_spin_lock_irq(&sem->wait_lock);
+       if (list_empty(&sem->wait_list)) {
+               /*
+                * In case the wait queue is empty and the lock isn't owned
+                * by a writer, this reader can exit the slowpath and return
+                * immediately as its RWSEM_ACTIVE_READ_BIAS has already
+                * been set in the count.
+                */
+               if (atomic_long_read(&sem->count) >= 0) {
+                       raw_spin_unlock_irq(&sem->wait_lock);
+                       rwsem_set_reader_owned(sem);
+                       lockevent_inc(rwsem_rlock_fast);
+                       return sem;
+               }
+               adjustment += RWSEM_WAITING_BIAS;
+       }
+       list_add_tail(&waiter.list, &sem->wait_list);
+
+       /* we're now waiting on the lock, but no longer actively locking */
+       count = atomic_long_add_return(adjustment, &sem->count);
+
+       /*
+        * If there are no active locks, wake the front queued process(es).
+        *
+        * If there are no writers and we are first in the queue,
+        * wake our own waiter to join the existing active readers !
+        */
+       if (count == RWSEM_WAITING_BIAS ||
+           (count > RWSEM_WAITING_BIAS &&
+            adjustment != -RWSEM_ACTIVE_READ_BIAS))
+               __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
+
+       raw_spin_unlock_irq(&sem->wait_lock);
+       wake_up_q(&wake_q);
+
+       /* wait to be given the lock */
+       while (true) {
+               set_current_state(state);
+               if (!waiter.task)
+                       break;
+               if (signal_pending_state(state, current)) {
+                       raw_spin_lock_irq(&sem->wait_lock);
+                       if (waiter.task)
+                               goto out_nolock;
+                       raw_spin_unlock_irq(&sem->wait_lock);
+                       break;
+               }
+               schedule();
+               lockevent_inc(rwsem_sleep_reader);
+       }
+
+       __set_current_state(TASK_RUNNING);
+       lockevent_inc(rwsem_rlock);
+       return sem;
+out_nolock:
+       list_del(&waiter.list);
+       if (list_empty(&sem->wait_list))
+               atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
+       raw_spin_unlock_irq(&sem->wait_lock);
+       __set_current_state(TASK_RUNNING);
+       lockevent_inc(rwsem_rlock_fail);
+       return ERR_PTR(-EINTR);
+}
+
+__visible struct rw_semaphore * __sched
+rwsem_down_read_failed(struct rw_semaphore *sem)
+{
+       return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(rwsem_down_read_failed);
+
+__visible struct rw_semaphore * __sched
+rwsem_down_read_failed_killable(struct rw_semaphore *sem)
+{
+       return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
+}
+EXPORT_SYMBOL(rwsem_down_read_failed_killable);
+
 /*
  * Wait until we successfully acquire the write lock
  */
@@ -580,6 +583,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
                                goto out_nolock;
 
                        schedule();
+                       lockevent_inc(rwsem_sleep_writer);
                        set_current_state(state);
                } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
 
@@ -588,6 +592,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
        __set_current_state(TASK_RUNNING);
        list_del(&waiter.list);
        raw_spin_unlock_irq(&sem->wait_lock);
+       lockevent_inc(rwsem_wlock);
 
        return ret;
 
@@ -601,6 +606,7 @@ out_nolock:
                __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
        raw_spin_unlock_irq(&sem->wait_lock);
        wake_up_q(&wake_q);
+       lockevent_inc(rwsem_wlock_fail);
 
        return ERR_PTR(-EINTR);
 }
index e586f0d03ad38f51539be76c4f042642cdd89200..ccbf18f560ff1d85f0835ff5cadc8044cae00bfc 100644 (file)
@@ -24,7 +24,6 @@ void __sched down_read(struct rw_semaphore *sem)
        rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
 
        LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
-       rwsem_set_reader_owned(sem);
 }
 
 EXPORT_SYMBOL(down_read);
@@ -39,7 +38,6 @@ int __sched down_read_killable(struct rw_semaphore *sem)
                return -EINTR;
        }
 
-       rwsem_set_reader_owned(sem);
        return 0;
 }
 
@@ -52,10 +50,8 @@ int down_read_trylock(struct rw_semaphore *sem)
 {
        int ret = __down_read_trylock(sem);
 
-       if (ret == 1) {
+       if (ret == 1)
                rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
-               rwsem_set_reader_owned(sem);
-       }
        return ret;
 }
 
@@ -70,7 +66,6 @@ void __sched down_write(struct rw_semaphore *sem)
        rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
 
        LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
-       rwsem_set_owner(sem);
 }
 
 EXPORT_SYMBOL(down_write);
@@ -88,7 +83,6 @@ int __sched down_write_killable(struct rw_semaphore *sem)
                return -EINTR;
        }
 
-       rwsem_set_owner(sem);
        return 0;
 }
 
@@ -101,10 +95,8 @@ int down_write_trylock(struct rw_semaphore *sem)
 {
        int ret = __down_write_trylock(sem);
 
-       if (ret == 1) {
+       if (ret == 1)
                rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
-               rwsem_set_owner(sem);
-       }
 
        return ret;
 }
@@ -117,9 +109,7 @@ EXPORT_SYMBOL(down_write_trylock);
 void up_read(struct rw_semaphore *sem)
 {
        rwsem_release(&sem->dep_map, 1, _RET_IP_);
-       DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED));
 
-       rwsem_clear_reader_owned(sem);
        __up_read(sem);
 }
 
@@ -131,9 +121,7 @@ EXPORT_SYMBOL(up_read);
 void up_write(struct rw_semaphore *sem)
 {
        rwsem_release(&sem->dep_map, 1, _RET_IP_);
-       DEBUG_RWSEMS_WARN_ON(sem->owner != current);
 
-       rwsem_clear_owner(sem);
        __up_write(sem);
 }
 
@@ -145,9 +133,7 @@ EXPORT_SYMBOL(up_write);
 void downgrade_write(struct rw_semaphore *sem)
 {
        lock_downgrade(&sem->dep_map, _RET_IP_);
-       DEBUG_RWSEMS_WARN_ON(sem->owner != current);
 
-       rwsem_set_reader_owned(sem);
        __downgrade_write(sem);
 }
 
@@ -161,7 +147,6 @@ void down_read_nested(struct rw_semaphore *sem, int subclass)
        rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
 
        LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
-       rwsem_set_reader_owned(sem);
 }
 
 EXPORT_SYMBOL(down_read_nested);
@@ -172,7 +157,6 @@ void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
        rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
 
        LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
-       rwsem_set_owner(sem);
 }
 
 EXPORT_SYMBOL(_down_write_nest_lock);
@@ -193,7 +177,6 @@ void down_write_nested(struct rw_semaphore *sem, int subclass)
        rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
 
        LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
-       rwsem_set_owner(sem);
 }
 
 EXPORT_SYMBOL(down_write_nested);
@@ -208,7 +191,6 @@ int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
                return -EINTR;
        }
 
-       rwsem_set_owner(sem);
        return 0;
 }
 
@@ -216,7 +198,8 @@ EXPORT_SYMBOL(down_write_killable_nested);
 
 void up_read_non_owner(struct rw_semaphore *sem)
 {
-       DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED));
+       DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED),
+                               sem);
        __up_read(sem);
 }
 
index bad2bca0268b13f295c44201d15b3d34e25c1364..64877f5294e35b194232b1dca59e0db84d48d32c 100644 (file)
  * is involved. Ideally we would like to track all the readers that own
  * a rwsem, but the overhead is simply too big.
  */
+#include "lock_events.h"
+
 #define RWSEM_READER_OWNED     (1UL << 0)
 #define RWSEM_ANONYMOUSLY_OWNED        (1UL << 1)
 
 #ifdef CONFIG_DEBUG_RWSEMS
-# define DEBUG_RWSEMS_WARN_ON(c)       DEBUG_LOCKS_WARN_ON(c)
+# define DEBUG_RWSEMS_WARN_ON(c, sem)  do {                    \
+       if (!debug_locks_silent &&                              \
+           WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
+               #c, atomic_long_read(&(sem)->count),            \
+               (long)((sem)->owner), (long)current,            \
+               list_empty(&(sem)->wait_list) ? "" : "not "))   \
+                       debug_locks_off();                      \
+       } while (0)
+#else
+# define DEBUG_RWSEMS_WARN_ON(c, sem)
+#endif
+
+/*
+ * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
+ * Adapted largely from include/asm-i386/rwsem.h
+ * by Paul Mackerras <paulus@samba.org>.
+ */
+
+/*
+ * the semaphore definition
+ */
+#ifdef CONFIG_64BIT
+# define RWSEM_ACTIVE_MASK             0xffffffffL
 #else
-# define DEBUG_RWSEMS_WARN_ON(c)
+# define RWSEM_ACTIVE_MASK             0x0000ffffL
 #endif
 
+#define RWSEM_ACTIVE_BIAS              0x00000001L
+#define RWSEM_WAITING_BIAS             (-RWSEM_ACTIVE_MASK-1)
+#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 /*
  * All writes to owner are protected by WRITE_ONCE() to make sure that
@@ -132,3 +161,144 @@ static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
 {
 }
 #endif
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+/*
+ * lock for reading
+ */
+static inline void __down_read(struct rw_semaphore *sem)
+{
+       if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
+               rwsem_down_read_failed(sem);
+               DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
+                                       RWSEM_READER_OWNED), sem);
+       } else {
+               rwsem_set_reader_owned(sem);
+       }
+}
+
+static inline int __down_read_killable(struct rw_semaphore *sem)
+{
+       if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
+               if (IS_ERR(rwsem_down_read_failed_killable(sem)))
+                       return -EINTR;
+               DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
+                                       RWSEM_READER_OWNED), sem);
+       } else {
+               rwsem_set_reader_owned(sem);
+       }
+       return 0;
+}
+
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+       /*
+        * Optimize for the case when the rwsem is not locked at all.
+        */
+       long tmp = RWSEM_UNLOCKED_VALUE;
+
+       lockevent_inc(rwsem_rtrylock);
+       do {
+               if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
+                                       tmp + RWSEM_ACTIVE_READ_BIAS)) {
+                       rwsem_set_reader_owned(sem);
+                       return 1;
+               }
+       } while (tmp >= 0);
+       return 0;
+}
+
+/*
+ * lock for writing
+ */
+static inline void __down_write(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
+                                            &sem->count);
+       if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+               rwsem_down_write_failed(sem);
+       rwsem_set_owner(sem);
+}
+
+static inline int __down_write_killable(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
+                                            &sem->count);
+       if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+               if (IS_ERR(rwsem_down_write_failed_killable(sem)))
+                       return -EINTR;
+       rwsem_set_owner(sem);
+       return 0;
+}
+
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       lockevent_inc(rwsem_wtrylock);
+       tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
+                     RWSEM_ACTIVE_WRITE_BIAS);
+       if (tmp == RWSEM_UNLOCKED_VALUE) {
+               rwsem_set_owner(sem);
+               return true;
+       }
+       return false;
+}
+
+/*
+ * unlock after reading
+ */
+static inline void __up_read(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED),
+                               sem);
+       rwsem_clear_reader_owned(sem);
+       tmp = atomic_long_dec_return_release(&sem->count);
+       if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
+               rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void __up_write(struct rw_semaphore *sem)
+{
+       DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem);
+       rwsem_clear_owner(sem);
+       if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
+                                                   &sem->count) < 0))
+               rwsem_wake(sem);
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       /*
+        * When downgrading from exclusive to shared ownership,
+        * anything inside the write-locked region cannot leak
+        * into the read side. In contrast, anything in the
+        * read-locked region is ok to be re-ordered into the
+        * write side. As such, rely on RELEASE semantics.
+        */
+       DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem);
+       tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
+       rwsem_set_reader_owned(sem);
+       if (tmp < 0)
+               rwsem_downgrade_wake(sem);
+}
index f8fe57d1022e368b0ece751ba8c0140f66c965d8..9bbaaab14b36efc189c53152bb4a418872b5fe17 100644 (file)
@@ -114,6 +114,15 @@ config PM_SLEEP_SMP
        depends on PM_SLEEP
        select HOTPLUG_CPU
 
+config PM_SLEEP_SMP_NONZERO_CPU
+       def_bool y
+       depends on PM_SLEEP_SMP
+       depends on ARCH_SUSPEND_NONZERO_CPU
+       ---help---
+       If an arch can suspend (for suspend, hibernate, kexec, etc) on a
+       non-zero numbered CPU, it may define ARCH_SUSPEND_NONZERO_CPU. This
+       will allow nohz_full mask to include CPU0.
+
 config PM_AUTOSLEEP
        bool "Opportunistic sleep"
        depends on PM_SLEEP
index abef759de7c8fb4a8ece278fd7b7730d5b5e41ab..cfc7a57049e4c90367c9421913e8698055fa0e6c 100644 (file)
@@ -281,7 +281,7 @@ static int create_image(int platform_mode)
        if (error || hibernation_test(TEST_PLATFORM))
                goto Platform_finish;
 
-       error = disable_nonboot_cpus();
+       error = suspend_disable_secondary_cpus();
        if (error || hibernation_test(TEST_CPUS))
                goto Enable_cpus;
 
@@ -323,7 +323,7 @@ static int create_image(int platform_mode)
        local_irq_enable();
 
  Enable_cpus:
-       enable_nonboot_cpus();
+       suspend_enable_secondary_cpus();
 
  Platform_finish:
        platform_finish(platform_mode);
@@ -417,7 +417,7 @@ int hibernation_snapshot(int platform_mode)
 
 int __weak hibernate_resume_nonboot_cpu_disable(void)
 {
-       return disable_nonboot_cpus();
+       return suspend_disable_secondary_cpus();
 }
 
 /**
@@ -486,7 +486,7 @@ static int resume_target_kernel(bool platform_mode)
        local_irq_enable();
 
  Enable_cpus:
-       enable_nonboot_cpus();
+       suspend_enable_secondary_cpus();
 
  Cleanup:
        platform_restore_cleanup(platform_mode);
@@ -564,7 +564,7 @@ int hibernation_platform_enter(void)
        if (error)
                goto Platform_finish;
 
-       error = disable_nonboot_cpus();
+       error = suspend_disable_secondary_cpus();
        if (error)
                goto Enable_cpus;
 
@@ -586,7 +586,7 @@ int hibernation_platform_enter(void)
        local_irq_enable();
 
  Enable_cpus:
-       enable_nonboot_cpus();
+       suspend_enable_secondary_cpus();
 
  Platform_finish:
        hibernation_ops->finish();
index 0bd595a0b6103c56439871b765ef0d4fb5ac672b..59b6def230462cd987d22563e34420793acb90bf 100644 (file)
@@ -428,7 +428,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
        if (suspend_test(TEST_PLATFORM))
                goto Platform_wake;
 
-       error = disable_nonboot_cpus();
+       error = suspend_disable_secondary_cpus();
        if (error || suspend_test(TEST_CPUS))
                goto Enable_cpus;
 
@@ -458,7 +458,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
        BUG_ON(irqs_disabled());
 
  Enable_cpus:
-       enable_nonboot_cpus();
+       suspend_enable_secondary_cpus();
 
  Platform_wake:
        platform_resume_noirq(state);
index 771e93f9c43f826270c1927665fa5d6aaa8654e7..6f357f4fc85900db94f5a9dc45b098849e0e158d 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/hw_breakpoint.h>
 #include <linux/cn_proc.h>
 #include <linux/compat.h>
+#include <linux/sched/signal.h>
 
 /*
  * Access another process' address space via ptrace.
@@ -924,18 +925,26 @@ int ptrace_request(struct task_struct *child, long request,
                        ret = ptrace_setsiginfo(child, &siginfo);
                break;
 
-       case PTRACE_GETSIGMASK:
+       case PTRACE_GETSIGMASK: {
+               sigset_t *mask;
+
                if (addr != sizeof(sigset_t)) {
                        ret = -EINVAL;
                        break;
                }
 
-               if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
+               if (test_tsk_restore_sigmask(child))
+                       mask = &child->saved_sigmask;
+               else
+                       mask = &child->blocked;
+
+               if (copy_to_user(datavp, mask, sizeof(sigset_t)))
                        ret = -EFAULT;
                else
                        ret = 0;
 
                break;
+       }
 
        case PTRACE_SETSIGMASK: {
                sigset_t new_set;
@@ -961,6 +970,8 @@ int ptrace_request(struct task_struct *child, long request,
                child->blocked = new_set;
                spin_unlock_irq(&child->sighand->siglock);
 
+               clear_tsk_restore_sigmask(child);
+
                ret = 0;
                break;
        }
index acee72c0b24b59945000cf1617e37895181c4036..4b58c907b4b7f416c76c8f6ac718c406d978570d 100644 (file)
@@ -233,6 +233,7 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
 #ifdef CONFIG_RCU_STALL_COMMON
 
 extern int rcu_cpu_stall_suppress;
+extern int rcu_cpu_stall_timeout;
 int rcu_jiffies_till_stall_check(void);
 
 #define rcu_ftrace_dump_stall_suppress() \
index c29761152874471bd42f696e3e320304bf7b6f91..7a6890b23c5f545134607ed8897b841b841ee3bd 100644 (file)
@@ -494,6 +494,10 @@ rcu_perf_cleanup(void)
 
        if (torture_cleanup_begin())
                return;
+       if (!cur_ops) {
+               torture_cleanup_end();
+               return;
+       }
 
        if (reader_tasks) {
                for (i = 0; i < nrealreaders; i++)
@@ -614,6 +618,7 @@ rcu_perf_init(void)
                pr_cont("\n");
                WARN_ON(!IS_MODULE(CONFIG_RCU_PERF_TEST));
                firsterr = -EINVAL;
+               cur_ops = NULL;
                goto unwind;
        }
        if (cur_ops->init)
index f14d1b18a74fce92f987f70374965e1235b93a85..efaa5b3f4d3f0dc2faff7c4d8a1a5a5757ed2ff2 100644 (file)
@@ -299,7 +299,6 @@ struct rcu_torture_ops {
        int irq_capable;
        int can_boost;
        int extendables;
-       int ext_irq_conflict;
        const char *name;
 };
 
@@ -592,12 +591,7 @@ static void srcu_torture_init(void)
 
 static void srcu_torture_cleanup(void)
 {
-       static DEFINE_TORTURE_RANDOM(rand);
-
-       if (torture_random(&rand) & 0x800)
-               cleanup_srcu_struct(&srcu_ctld);
-       else
-               cleanup_srcu_struct_quiesced(&srcu_ctld);
+       cleanup_srcu_struct(&srcu_ctld);
        srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
 }
 
@@ -1160,7 +1154,7 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
        unsigned long randmask2 = randmask1 >> 3;
 
        WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
-       /* Most of the time lots of bits, half the time only one bit. */
+       /* Mostly only one bit (need preemption!), sometimes lots of bits. */
        if (!(randmask1 & 0x7))
                mask = mask & randmask2;
        else
@@ -1170,10 +1164,6 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
            ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
             (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
                mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
-       if ((mask & RCUTORTURE_RDR_IRQ) &&
-           !(mask & cur_ops->ext_irq_conflict) &&
-           (oldmask & cur_ops->ext_irq_conflict))
-               mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */
        return mask ?: RCUTORTURE_RDR_RCU;
 }
 
@@ -1848,7 +1838,7 @@ static int rcutorture_oom_notify(struct notifier_block *self,
        WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
             __func__);
        rcu_torture_fwd_cb_hist();
-       rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat) / 2));
+       rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat)) / 2);
        WRITE_ONCE(rcu_fwd_emergency_stop, true);
        smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
        pr_info("%s: Freed %lu RCU callbacks.\n",
@@ -2094,6 +2084,10 @@ rcu_torture_cleanup(void)
                        cur_ops->cb_barrier();
                return;
        }
+       if (!cur_ops) {
+               torture_cleanup_end();
+               return;
+       }
 
        rcu_torture_barrier_cleanup();
        torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
@@ -2267,6 +2261,7 @@ rcu_torture_init(void)
                pr_cont("\n");
                WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
                firsterr = -EINVAL;
+               cur_ops = NULL;
                goto unwind;
        }
        if (cur_ops->fqs == NULL && fqs_duration != 0) {
index 5d4a39a6505a43aa9c1c4b11f629be7922113535..44d6606b83257acde72fe92435de6245fe77131e 100644 (file)
@@ -76,19 +76,16 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
  * Must invoke this after you are finished using a given srcu_struct that
  * was initialized via init_srcu_struct(), else you leak memory.
  */
-void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
+void cleanup_srcu_struct(struct srcu_struct *ssp)
 {
        WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]);
-       if (quiesced)
-               WARN_ON(work_pending(&ssp->srcu_work));
-       else
-               flush_work(&ssp->srcu_work);
+       flush_work(&ssp->srcu_work);
        WARN_ON(ssp->srcu_gp_running);
        WARN_ON(ssp->srcu_gp_waiting);
        WARN_ON(ssp->srcu_cb_head);
        WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
 }
-EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
+EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
 
 /*
  * Removes the count for the old reader from the appropriate element of
index a60b8ba9e1aca68d27731606f25f02b14ddcdbb9..9b761e546de8ce522443665fbc01263f4d8202e3 100644 (file)
@@ -360,8 +360,14 @@ static unsigned long srcu_get_delay(struct srcu_struct *ssp)
        return SRCU_INTERVAL;
 }
 
-/* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */
-void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
+/**
+ * cleanup_srcu_struct - deconstruct a sleep-RCU structure
+ * @ssp: structure to clean up.
+ *
+ * Must invoke this after you are finished using a given srcu_struct that
+ * was initialized via init_srcu_struct(), else you leak memory.
+ */
+void cleanup_srcu_struct(struct srcu_struct *ssp)
 {
        int cpu;
 
@@ -369,24 +375,14 @@ void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
                return; /* Just leak it! */
        if (WARN_ON(srcu_readers_active(ssp)))
                return; /* Just leak it! */
-       if (quiesced) {
-               if (WARN_ON(delayed_work_pending(&ssp->work)))
-                       return; /* Just leak it! */
-       } else {
-               flush_delayed_work(&ssp->work);
-       }
+       flush_delayed_work(&ssp->work);
        for_each_possible_cpu(cpu) {
                struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
 
-               if (quiesced) {
-                       if (WARN_ON(timer_pending(&sdp->delay_work)))
-                               return; /* Just leak it! */
-                       if (WARN_ON(work_pending(&sdp->work)))
-                               return; /* Just leak it! */
-               } else {
-                       del_timer_sync(&sdp->delay_work);
-                       flush_work(&sdp->work);
-               }
+               del_timer_sync(&sdp->delay_work);
+               flush_work(&sdp->work);
+               if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
+                       return; /* Forgot srcu_barrier(), so just leak it! */
        }
        if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
            WARN_ON(srcu_readers_active(ssp))) {
@@ -397,7 +393,7 @@ void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
        free_percpu(ssp->sda);
        ssp->sda = NULL;
 }
-EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
+EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
 
 /*
  * Counts the new reader in the appropriate per-CPU element of the
index 911bd9076d435fbac81c1a6f30000cc1bcbcb339..477b4eb44af5c9ea48c639f3ccf88156d54520b1 100644 (file)
@@ -52,7 +52,7 @@ void rcu_qs(void)
        local_irq_save(flags);
        if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
                rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
-               raise_softirq(RCU_SOFTIRQ);
+               raise_softirq_irqoff(RCU_SOFTIRQ);
        }
        local_irq_restore(flags);
 }
index acd6ccf56faf9ff090b43ea08458b3380f22cc3d..ec77ec336f582ac3379ce667d42c89ec70f236b2 100644 (file)
@@ -102,11 +102,6 @@ int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
 /* Number of rcu_nodes at specified level. */
 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
-/* panic() on RCU Stall sysctl. */
-int sysctl_panic_on_rcu_stall __read_mostly;
-/* Commandeer a sysrq key to dump RCU's tree. */
-static bool sysrq_rcu;
-module_param(sysrq_rcu, bool, 0444);
 
 /*
  * The rcu_scheduler_active variable is initialized to the value
@@ -149,7 +144,7 @@ static void sync_sched_exp_online_cleanup(int cpu);
 
 /* rcuc/rcub kthread realtime priority */
 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
-module_param(kthread_prio, int, 0644);
+module_param(kthread_prio, int, 0444);
 
 /* Delay in jiffies for grace-period initialization delays, debug only. */
 
@@ -406,7 +401,7 @@ static bool rcu_kick_kthreads;
  */
 static ulong jiffies_till_sched_qs = ULONG_MAX;
 module_param(jiffies_till_sched_qs, ulong, 0444);
-static ulong jiffies_to_sched_qs; /* Adjusted version of above if not default */
+static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
 
 /*
@@ -424,6 +419,7 @@ static void adjust_jiffies_till_sched_qs(void)
                WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
                return;
        }
+       /* Otherwise, set to third fqs scan, but bound below on large system. */
        j = READ_ONCE(jiffies_till_first_fqs) +
                      2 * READ_ONCE(jiffies_till_next_fqs);
        if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
@@ -512,74 +508,6 @@ static const char *gp_state_getname(short gs)
        return gp_state_names[gs];
 }
 
-/*
- * Show the state of the grace-period kthreads.
- */
-void show_rcu_gp_kthreads(void)
-{
-       int cpu;
-       unsigned long j;
-       unsigned long ja;
-       unsigned long jr;
-       unsigned long jw;
-       struct rcu_data *rdp;
-       struct rcu_node *rnp;
-
-       j = jiffies;
-       ja = j - READ_ONCE(rcu_state.gp_activity);
-       jr = j - READ_ONCE(rcu_state.gp_req_activity);
-       jw = j - READ_ONCE(rcu_state.gp_wake_time);
-       pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
-               rcu_state.name, gp_state_getname(rcu_state.gp_state),
-               rcu_state.gp_state,
-               rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL,
-               ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
-               (long)READ_ONCE(rcu_state.gp_seq),
-               (long)READ_ONCE(rcu_get_root()->gp_seq_needed),
-               READ_ONCE(rcu_state.gp_flags));
-       rcu_for_each_node_breadth_first(rnp) {
-               if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
-                       continue;
-               pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
-                       rnp->grplo, rnp->grphi, (long)rnp->gp_seq,
-                       (long)rnp->gp_seq_needed);
-               if (!rcu_is_leaf_node(rnp))
-                       continue;
-               for_each_leaf_node_possible_cpu(rnp, cpu) {
-                       rdp = per_cpu_ptr(&rcu_data, cpu);
-                       if (rdp->gpwrap ||
-                           ULONG_CMP_GE(rcu_state.gp_seq,
-                                        rdp->gp_seq_needed))
-                               continue;
-                       pr_info("\tcpu %d ->gp_seq_needed %ld\n",
-                               cpu, (long)rdp->gp_seq_needed);
-               }
-       }
-       /* sched_show_task(rcu_state.gp_kthread); */
-}
-EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
-
-/* Dump grace-period-request information due to commandeered sysrq. */
-static void sysrq_show_rcu(int key)
-{
-       show_rcu_gp_kthreads();
-}
-
-static struct sysrq_key_op sysrq_rcudump_op = {
-       .handler = sysrq_show_rcu,
-       .help_msg = "show-rcu(y)",
-       .action_msg = "Show RCU tree",
-       .enable_mask = SYSRQ_ENABLE_DUMP,
-};
-
-static int __init rcu_sysrq_init(void)
-{
-       if (sysrq_rcu)
-               return register_sysrq_key('y', &sysrq_rcudump_op);
-       return 0;
-}
-early_initcall(rcu_sysrq_init);
-
 /*
  * Send along grace-period-related data for rcutorture diagnostics.
  */
@@ -1033,27 +961,6 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
        return 0;
 }
 
-/*
- * Handler for the irq_work request posted when a grace period has
- * gone on for too long, but not yet long enough for an RCU CPU
- * stall warning.  Set state appropriately, but just complain if
- * there is unexpected state on entry.
- */
-static void rcu_iw_handler(struct irq_work *iwp)
-{
-       struct rcu_data *rdp;
-       struct rcu_node *rnp;
-
-       rdp = container_of(iwp, struct rcu_data, rcu_iw);
-       rnp = rdp->mynode;
-       raw_spin_lock_rcu_node(rnp);
-       if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
-               rdp->rcu_iw_gp_seq = rnp->gp_seq;
-               rdp->rcu_iw_pending = false;
-       }
-       raw_spin_unlock_rcu_node(rnp);
-}
-
 /*
  * Return true if the specified CPU has passed through a quiescent
  * state by virtue of being in or having passed through an dynticks
@@ -1167,295 +1074,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
        return 0;
 }
 
-static void record_gp_stall_check_time(void)
-{
-       unsigned long j = jiffies;
-       unsigned long j1;
-
-       rcu_state.gp_start = j;
-       j1 = rcu_jiffies_till_stall_check();
-       /* Record ->gp_start before ->jiffies_stall. */
-       smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */
-       rcu_state.jiffies_resched = j + j1 / 2;
-       rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
-}
-
-/*
- * Complain about starvation of grace-period kthread.
- */
-static void rcu_check_gp_kthread_starvation(void)
-{
-       struct task_struct *gpk = rcu_state.gp_kthread;
-       unsigned long j;
-
-       j = jiffies - READ_ONCE(rcu_state.gp_activity);
-       if (j > 2 * HZ) {
-               pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
-                      rcu_state.name, j,
-                      (long)rcu_seq_current(&rcu_state.gp_seq),
-                      READ_ONCE(rcu_state.gp_flags),
-                      gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
-                      gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
-               if (gpk) {
-                       pr_err("RCU grace-period kthread stack dump:\n");
-                       sched_show_task(gpk);
-                       wake_up_process(gpk);
-               }
-       }
-}
-
-/*
- * Dump stacks of all tasks running on stalled CPUs.  First try using
- * NMIs, but fall back to manual remote stack tracing on architectures
- * that don't support NMI-based stack dumps.  The NMI-triggered stack
- * traces are more accurate because they are printed by the target CPU.
- */
-static void rcu_dump_cpu_stacks(void)
-{
-       int cpu;
-       unsigned long flags;
-       struct rcu_node *rnp;
-
-       rcu_for_each_leaf_node(rnp) {
-               raw_spin_lock_irqsave_rcu_node(rnp, flags);
-               for_each_leaf_node_possible_cpu(rnp, cpu)
-                       if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
-                               if (!trigger_single_cpu_backtrace(cpu))
-                                       dump_cpu_task(cpu);
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-       }
-}
-
-/*
- * If too much time has passed in the current grace period, and if
- * so configured, go kick the relevant kthreads.
- */
-static void rcu_stall_kick_kthreads(void)
-{
-       unsigned long j;
-
-       if (!rcu_kick_kthreads)
-               return;
-       j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
-       if (time_after(jiffies, j) && rcu_state.gp_kthread &&
-           (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
-               WARN_ONCE(1, "Kicking %s grace-period kthread\n",
-                         rcu_state.name);
-               rcu_ftrace_dump(DUMP_ALL);
-               wake_up_process(rcu_state.gp_kthread);
-               WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
-       }
-}
-
-static void panic_on_rcu_stall(void)
-{
-       if (sysctl_panic_on_rcu_stall)
-               panic("RCU Stall\n");
-}
-
-static void print_other_cpu_stall(unsigned long gp_seq)
-{
-       int cpu;
-       unsigned long flags;
-       unsigned long gpa;
-       unsigned long j;
-       int ndetected = 0;
-       struct rcu_node *rnp = rcu_get_root();
-       long totqlen = 0;
-
-       /* Kick and suppress, if so configured. */
-       rcu_stall_kick_kthreads();
-       if (rcu_cpu_stall_suppress)
-               return;
-
-       /*
-        * OK, time to rat on our buddy...
-        * See Documentation/RCU/stallwarn.txt for info on how to debug
-        * RCU CPU stall warnings.
-        */
-       pr_err("INFO: %s detected stalls on CPUs/tasks:", rcu_state.name);
-       print_cpu_stall_info_begin();
-       rcu_for_each_leaf_node(rnp) {
-               raw_spin_lock_irqsave_rcu_node(rnp, flags);
-               ndetected += rcu_print_task_stall(rnp);
-               if (rnp->qsmask != 0) {
-                       for_each_leaf_node_possible_cpu(rnp, cpu)
-                               if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
-                                       print_cpu_stall_info(cpu);
-                                       ndetected++;
-                               }
-               }
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-       }
-
-       print_cpu_stall_info_end();
-       for_each_possible_cpu(cpu)
-               totqlen += rcu_get_n_cbs_cpu(cpu);
-       pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
-              smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
-              (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
-       if (ndetected) {
-               rcu_dump_cpu_stacks();
-
-               /* Complain about tasks blocking the grace period. */
-               rcu_print_detail_task_stall();
-       } else {
-               if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
-                       pr_err("INFO: Stall ended before state dump start\n");
-               } else {
-                       j = jiffies;
-                       gpa = READ_ONCE(rcu_state.gp_activity);
-                       pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
-                              rcu_state.name, j - gpa, j, gpa,
-                              READ_ONCE(jiffies_till_next_fqs),
-                              rcu_get_root()->qsmask);
-                       /* In this case, the current CPU might be at fault. */
-                       sched_show_task(current);
-               }
-       }
-       /* Rewrite if needed in case of slow consoles. */
-       if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
-               WRITE_ONCE(rcu_state.jiffies_stall,
-                          jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
-
-       rcu_check_gp_kthread_starvation();
-
-       panic_on_rcu_stall();
-
-       rcu_force_quiescent_state();  /* Kick them all. */
-}
-
-static void print_cpu_stall(void)
-{
-       int cpu;
-       unsigned long flags;
-       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
-       struct rcu_node *rnp = rcu_get_root();
-       long totqlen = 0;
-
-       /* Kick and suppress, if so configured. */
-       rcu_stall_kick_kthreads();
-       if (rcu_cpu_stall_suppress)
-               return;
-
-       /*
-        * OK, time to rat on ourselves...
-        * See Documentation/RCU/stallwarn.txt for info on how to debug
-        * RCU CPU stall warnings.
-        */
-       pr_err("INFO: %s self-detected stall on CPU", rcu_state.name);
-       print_cpu_stall_info_begin();
-       raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
-       print_cpu_stall_info(smp_processor_id());
-       raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
-       print_cpu_stall_info_end();
-       for_each_possible_cpu(cpu)
-               totqlen += rcu_get_n_cbs_cpu(cpu);
-       pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
-               jiffies - rcu_state.gp_start,
-               (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
-
-       rcu_check_gp_kthread_starvation();
-
-       rcu_dump_cpu_stacks();
-
-       raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       /* Rewrite if needed in case of slow consoles. */
-       if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
-               WRITE_ONCE(rcu_state.jiffies_stall,
-                          jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
-       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-
-       panic_on_rcu_stall();
-
-       /*
-        * Attempt to revive the RCU machinery by forcing a context switch.
-        *
-        * A context switch would normally allow the RCU state machine to make
-        * progress and it could be we're stuck in kernel space without context
-        * switches for an entirely unreasonable amount of time.
-        */
-       set_tsk_need_resched(current);
-       set_preempt_need_resched();
-}
-
-static void check_cpu_stall(struct rcu_data *rdp)
-{
-       unsigned long gs1;
-       unsigned long gs2;
-       unsigned long gps;
-       unsigned long j;
-       unsigned long jn;
-       unsigned long js;
-       struct rcu_node *rnp;
-
-       if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
-           !rcu_gp_in_progress())
-               return;
-       rcu_stall_kick_kthreads();
-       j = jiffies;
-
-       /*
-        * Lots of memory barriers to reject false positives.
-        *
-        * The idea is to pick up rcu_state.gp_seq, then
-        * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
-        * another copy of rcu_state.gp_seq.  These values are updated in
-        * the opposite order with memory barriers (or equivalent) during
-        * grace-period initialization and cleanup.  Now, a false positive
-        * can occur if we get an new value of rcu_state.gp_start and a old
-        * value of rcu_state.jiffies_stall.  But given the memory barriers,
-        * the only way that this can happen is if one grace period ends
-        * and another starts between these two fetches.  This is detected
-        * by comparing the second fetch of rcu_state.gp_seq with the
-        * previous fetch from rcu_state.gp_seq.
-        *
-        * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
-        * and rcu_state.gp_start suffice to forestall false positives.
-        */
-       gs1 = READ_ONCE(rcu_state.gp_seq);
-       smp_rmb(); /* Pick up ->gp_seq first... */
-       js = READ_ONCE(rcu_state.jiffies_stall);
-       smp_rmb(); /* ...then ->jiffies_stall before the rest... */
-       gps = READ_ONCE(rcu_state.gp_start);
-       smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
-       gs2 = READ_ONCE(rcu_state.gp_seq);
-       if (gs1 != gs2 ||
-           ULONG_CMP_LT(j, js) ||
-           ULONG_CMP_GE(gps, js))
-               return; /* No stall or GP completed since entering function. */
-       rnp = rdp->mynode;
-       jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
-       if (rcu_gp_in_progress() &&
-           (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
-           cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
-
-               /* We haven't checked in, so go dump stack. */
-               print_cpu_stall();
-
-       } else if (rcu_gp_in_progress() &&
-                  ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
-                  cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
-
-               /* They had a few time units to dump stack, so complain. */
-               print_other_cpu_stall(gs2);
-       }
-}
-
-/**
- * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
- *
- * Set the stall-warning timeout way off into the future, thus preventing
- * any RCU CPU stall-warning messages from appearing in the current set of
- * RCU grace periods.
- *
- * The caller must disable hard irqs.
- */
-void rcu_cpu_stall_reset(void)
-{
-       WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
-}
-
 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
                              unsigned long gp_seq_req, const char *s)
@@ -1585,7 +1203,7 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
 static void rcu_gp_kthread_wake(void)
 {
        if ((current == rcu_state.gp_kthread &&
-            !in_interrupt() && !in_serving_softirq()) ||
+            !in_irq() && !in_serving_softirq()) ||
            !READ_ONCE(rcu_state.gp_flags) ||
            !rcu_state.gp_kthread)
                return;
@@ -2295,11 +1913,10 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
                return;
        }
        mask = rdp->grpmask;
+       rdp->core_needs_qs = false;
        if ((rnp->qsmask & mask) == 0) {
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        } else {
-               rdp->core_needs_qs = false;
-
                /*
                 * This GP can't end until cpu checks in, so all of our
                 * callbacks can be processed during the next GP.
@@ -2548,11 +2165,11 @@ void rcu_sched_clock_irq(int user)
 }
 
 /*
- * Scan the leaf rcu_node structures, processing dyntick state for any that
- * have not yet encountered a quiescent state, using the function specified.
- * Also initiate boosting for any threads blocked on the root rcu_node.
- *
- * The caller must have suppressed start of new grace periods.
+ * Scan the leaf rcu_node structures.  For each structure on which all
+ * CPUs have reported a quiescent state and on which there are tasks
+ * blocking the current grace period, initiate RCU priority boosting.
+ * Otherwise, invoke the specified function to check dyntick state for
+ * each CPU that has not yet reported a quiescent state.
  */
 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
 {
@@ -2635,101 +2252,6 @@ void rcu_force_quiescent_state(void)
 }
 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
 
-/*
- * This function checks for grace-period requests that fail to motivate
- * RCU to come out of its idle mode.
- */
-void
-rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
-                        const unsigned long gpssdelay)
-{
-       unsigned long flags;
-       unsigned long j;
-       struct rcu_node *rnp_root = rcu_get_root();
-       static atomic_t warned = ATOMIC_INIT(0);
-
-       if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
-           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
-               return;
-       j = jiffies; /* Expensive access, and in common case don't get here. */
-       if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
-           time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
-           atomic_read(&warned))
-               return;
-
-       raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       j = jiffies;
-       if (rcu_gp_in_progress() ||
-           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
-           time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
-           time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
-           atomic_read(&warned)) {
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-               return;
-       }
-       /* Hold onto the leaf lock to make others see warned==1. */
-
-       if (rnp_root != rnp)
-               raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
-       j = jiffies;
-       if (rcu_gp_in_progress() ||
-           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
-           time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
-           time_before(j, rcu_state.gp_activity + gpssdelay) ||
-           atomic_xchg(&warned, 1)) {
-               raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-               return;
-       }
-       WARN_ON(1);
-       if (rnp_root != rnp)
-               raw_spin_unlock_rcu_node(rnp_root);
-       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-       show_rcu_gp_kthreads();
-}
-
-/*
- * Do a forward-progress check for rcutorture.  This is normally invoked
- * due to an OOM event.  The argument "j" gives the time period during
- * which rcutorture would like progress to have been made.
- */
-void rcu_fwd_progress_check(unsigned long j)
-{
-       unsigned long cbs;
-       int cpu;
-       unsigned long max_cbs = 0;
-       int max_cpu = -1;
-       struct rcu_data *rdp;
-
-       if (rcu_gp_in_progress()) {
-               pr_info("%s: GP age %lu jiffies\n",
-                       __func__, jiffies - rcu_state.gp_start);
-               show_rcu_gp_kthreads();
-       } else {
-               pr_info("%s: Last GP end %lu jiffies ago\n",
-                       __func__, jiffies - rcu_state.gp_end);
-               preempt_disable();
-               rdp = this_cpu_ptr(&rcu_data);
-               rcu_check_gp_start_stall(rdp->mynode, rdp, j);
-               preempt_enable();
-       }
-       for_each_possible_cpu(cpu) {
-               cbs = rcu_get_n_cbs_cpu(cpu);
-               if (!cbs)
-                       continue;
-               if (max_cpu < 0)
-                       pr_info("%s: callbacks", __func__);
-               pr_cont(" %d: %lu", cpu, cbs);
-               if (cbs <= max_cbs)
-                       continue;
-               max_cbs = cbs;
-               max_cpu = cpu;
-       }
-       if (max_cpu >= 0)
-               pr_cont("\n");
-}
-EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
-
 /* Perform RCU core processing work for the current CPU.  */
 static __latent_entropy void rcu_core(struct softirq_action *unused)
 {
@@ -3559,13 +3081,11 @@ static int rcu_pm_notify(struct notifier_block *self,
        switch (action) {
        case PM_HIBERNATION_PREPARE:
        case PM_SUSPEND_PREPARE:
-               if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
-                       rcu_expedite_gp();
+               rcu_expedite_gp();
                break;
        case PM_POST_HIBERNATION:
        case PM_POST_SUSPEND:
-               if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
-                       rcu_unexpedite_gp();
+               rcu_unexpedite_gp();
                break;
        default:
                break;
@@ -3742,8 +3262,7 @@ static void __init rcu_init_geometry(void)
                jiffies_till_first_fqs = d;
        if (jiffies_till_next_fqs == ULONG_MAX)
                jiffies_till_next_fqs = d;
-       if (jiffies_till_sched_qs == ULONG_MAX)
-               adjust_jiffies_till_sched_qs();
+       adjust_jiffies_till_sched_qs();
 
        /* If the compile-time values are accurate, just leave. */
        if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
@@ -3858,5 +3377,6 @@ void __init rcu_init(void)
        srcu_init();
 }
 
+#include "tree_stall.h"
 #include "tree_exp.h"
 #include "tree_plugin.h"
index bb4f995f2d3f2786d602fafff9f01b54889eaa62..e253d11af3c496209354987c00697eacb8b6edd6 100644 (file)
@@ -393,15 +393,13 @@ static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
 
 int rcu_dynticks_snap(struct rcu_data *rdp);
 
-/* Forward declarations for rcutree_plugin.h */
+/* Forward declarations for tree_plugin.h */
 static void rcu_bootup_announce(void);
 static void rcu_qs(void);
 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
-static void rcu_print_detail_task_stall(void);
-static int rcu_print_task_stall(struct rcu_node *rnp);
 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
 static void rcu_flavor_sched_clock_irq(int user);
@@ -418,9 +416,6 @@ static void rcu_prepare_for_idle(void);
 static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
 static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
 static void rcu_preempt_deferred_qs(struct task_struct *t);
-static void print_cpu_stall_info_begin(void);
-static void print_cpu_stall_info(int cpu);
-static void print_cpu_stall_info_end(void);
 static void zero_cpu_stall_ticks(struct rcu_data *rdp);
 static bool rcu_nocb_cpu_needs_barrier(int cpu);
 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
@@ -445,3 +440,10 @@ static void rcu_bind_gp_kthread(void);
 static bool rcu_nohz_full_cpu(void);
 static void rcu_dynticks_task_enter(void);
 static void rcu_dynticks_task_exit(void);
+
+/* Forward declarations for tree_stall.h */
+static void record_gp_stall_check_time(void);
+static void rcu_iw_handler(struct irq_work *iwp);
+static void check_cpu_stall(struct rcu_data *rdp);
+static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
+                                    const unsigned long gpssdelay);
index 4c2a0189e74891c4a980667215e4dd8750de7bc4..9c990df880d113f2468ba118ca1979d391fcf0d8 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/lockdep.h>
 
 static void rcu_exp_handler(void *unused);
+static int rcu_print_task_exp_stall(struct rcu_node *rnp);
 
 /*
  * Record the start of an expedited grace period.
@@ -633,7 +634,7 @@ static void rcu_exp_handler(void *unused)
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
                if (rnp->expmask & rdp->grpmask) {
                        rdp->deferred_qs = true;
-                       WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true);
+                       t->rcu_read_unlock_special.b.exp_hint = true;
                }
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;
@@ -648,7 +649,7 @@ static void rcu_exp_handler(void *unused)
         *
         * If the CPU is fully enabled (or if some buggy RCU-preempt
         * read-side critical section is being used from idle), just
-        * invoke rcu_preempt_defer_qs() to immediately report the
+        * invoke rcu_preempt_deferred_qs() to immediately report the
         * quiescent state.  We cannot use rcu_read_unlock_special()
         * because we are in an interrupt handler, which will cause that
         * function to take an early exit without doing anything.
@@ -670,6 +671,27 @@ static void sync_sched_exp_online_cleanup(int cpu)
 {
 }
 
+/*
+ * Scan the current list of tasks blocked within RCU read-side critical
+ * sections, printing out the tid of each that is blocking the current
+ * expedited grace period.
+ */
+static int rcu_print_task_exp_stall(struct rcu_node *rnp)
+{
+       struct task_struct *t;
+       int ndetected = 0;
+
+       if (!rnp->exp_tasks)
+               return 0;
+       t = list_entry(rnp->exp_tasks->prev,
+                      struct task_struct, rcu_node_entry);
+       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
+               pr_cont(" P%d", t->pid);
+               ndetected++;
+       }
+       return ndetected;
+}
+
 #else /* #ifdef CONFIG_PREEMPT_RCU */
 
 /* Invoked on each online non-idle CPU for expedited quiescent state. */
@@ -709,6 +731,16 @@ static void sync_sched_exp_online_cleanup(int cpu)
        WARN_ON_ONCE(ret);
 }
 
+/*
+ * Because preemptible RCU does not exist, we never have to check for
+ * tasks blocked within RCU read-side critical sections that are
+ * blocking the current expedited grace period.
+ */
+static int rcu_print_task_exp_stall(struct rcu_node *rnp)
+{
+       return 0;
+}
+
 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 
 /**
index 97dba50f6fb24f01a150ad74935f53c2db542edd..1102765f91fd12ed7776f3f5cc88aefb743fe7ae 100644 (file)
@@ -285,7 +285,7 @@ static void rcu_qs(void)
                                       TPS("cpuqs"));
                __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
                barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */
-               current->rcu_read_unlock_special.b.need_qs = false;
+               WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false);
        }
 }
 
@@ -642,100 +642,6 @@ static void rcu_read_unlock_special(struct task_struct *t)
        rcu_preempt_deferred_qs_irqrestore(t, flags);
 }
 
-/*
- * Dump detailed information for all tasks blocking the current RCU
- * grace period on the specified rcu_node structure.
- */
-static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
-{
-       unsigned long flags;
-       struct task_struct *t;
-
-       raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       if (!rcu_preempt_blocked_readers_cgp(rnp)) {
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-               return;
-       }
-       t = list_entry(rnp->gp_tasks->prev,
-                      struct task_struct, rcu_node_entry);
-       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
-               /*
-                * We could be printing a lot while holding a spinlock.
-                * Avoid triggering hard lockup.
-                */
-               touch_nmi_watchdog();
-               sched_show_task(t);
-       }
-       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-}
-
-/*
- * Dump detailed information for all tasks blocking the current RCU
- * grace period.
- */
-static void rcu_print_detail_task_stall(void)
-{
-       struct rcu_node *rnp = rcu_get_root();
-
-       rcu_print_detail_task_stall_rnp(rnp);
-       rcu_for_each_leaf_node(rnp)
-               rcu_print_detail_task_stall_rnp(rnp);
-}
-
-static void rcu_print_task_stall_begin(struct rcu_node *rnp)
-{
-       pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
-              rnp->level, rnp->grplo, rnp->grphi);
-}
-
-static void rcu_print_task_stall_end(void)
-{
-       pr_cont("\n");
-}
-
-/*
- * Scan the current list of tasks blocked within RCU read-side critical
- * sections, printing out the tid of each.
- */
-static int rcu_print_task_stall(struct rcu_node *rnp)
-{
-       struct task_struct *t;
-       int ndetected = 0;
-
-       if (!rcu_preempt_blocked_readers_cgp(rnp))
-               return 0;
-       rcu_print_task_stall_begin(rnp);
-       t = list_entry(rnp->gp_tasks->prev,
-                      struct task_struct, rcu_node_entry);
-       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
-               pr_cont(" P%d", t->pid);
-               ndetected++;
-       }
-       rcu_print_task_stall_end();
-       return ndetected;
-}
-
-/*
- * Scan the current list of tasks blocked within RCU read-side critical
- * sections, printing out the tid of each that is blocking the current
- * expedited grace period.
- */
-static int rcu_print_task_exp_stall(struct rcu_node *rnp)
-{
-       struct task_struct *t;
-       int ndetected = 0;
-
-       if (!rnp->exp_tasks)
-               return 0;
-       t = list_entry(rnp->exp_tasks->prev,
-                      struct task_struct, rcu_node_entry);
-       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
-               pr_cont(" P%d", t->pid);
-               ndetected++;
-       }
-       return ndetected;
-}
-
 /*
  * Check that the list of blocked tasks for the newly completed grace
  * period is in fact empty.  It is a serious bug to complete a grace
@@ -804,19 +710,25 @@ static void rcu_flavor_sched_clock_irq(int user)
 
 /*
  * Check for a task exiting while in a preemptible-RCU read-side
- * critical section, clean up if so.  No need to issue warnings,
- * as debug_check_no_locks_held() already does this if lockdep
- * is enabled.
+ * critical section, clean up if so.  No need to issue warnings, as
+ * debug_check_no_locks_held() already does this if lockdep is enabled.
+ * Besides, if this function does anything other than just immediately
+ * return, there was a bug of some sort.  Spewing warnings from this
+ * function is like as not to simply obscure important prior warnings.
  */
 void exit_rcu(void)
 {
        struct task_struct *t = current;
 
-       if (likely(list_empty(&current->rcu_node_entry)))
+       if (unlikely(!list_empty(&current->rcu_node_entry))) {
+               t->rcu_read_lock_nesting = 1;
+               barrier();
+               WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
+       } else if (unlikely(t->rcu_read_lock_nesting)) {
+               t->rcu_read_lock_nesting = 1;
+       } else {
                return;
-       t->rcu_read_lock_nesting = 1;
-       barrier();
-       t->rcu_read_unlock_special.b.blocked = true;
+       }
        __rcu_read_unlock();
        rcu_preempt_deferred_qs(current);
 }
@@ -979,33 +891,6 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
 }
 static void rcu_preempt_deferred_qs(struct task_struct *t) { }
 
-/*
- * Because preemptible RCU does not exist, we never have to check for
- * tasks blocked within RCU read-side critical sections.
- */
-static void rcu_print_detail_task_stall(void)
-{
-}
-
-/*
- * Because preemptible RCU does not exist, we never have to check for
- * tasks blocked within RCU read-side critical sections.
- */
-static int rcu_print_task_stall(struct rcu_node *rnp)
-{
-       return 0;
-}
-
-/*
- * Because preemptible RCU does not exist, we never have to check for
- * tasks blocked within RCU read-side critical sections that are
- * blocking the current expedited grace period.
- */
-static int rcu_print_task_exp_stall(struct rcu_node *rnp)
-{
-       return 0;
-}
-
 /*
  * Because there is no preemptible RCU, there can be no readers blocked,
  * so there is no need to check for blocked tasks.  So check only for
@@ -1185,8 +1070,6 @@ static int rcu_boost_kthread(void *arg)
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
        __releases(rnp->lock)
 {
-       struct task_struct *t;
-
        raw_lockdep_assert_held_rcu_node(rnp);
        if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -1200,9 +1083,8 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
                if (rnp->exp_tasks == NULL)
                        rnp->boost_tasks = rnp->gp_tasks;
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-               t = rnp->boost_kthread_task;
-               if (t)
-                       rcu_wake_cond(t, rnp->boost_kthread_status);
+               rcu_wake_cond(rnp->boost_kthread_task,
+                             rnp->boost_kthread_status);
        } else {
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        }
@@ -1649,98 +1531,6 @@ static void rcu_cleanup_after_idle(void)
 
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
 
-#ifdef CONFIG_RCU_FAST_NO_HZ
-
-static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
-{
-       struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
-
-       sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c",
-               rdp->last_accelerate & 0xffff, jiffies & 0xffff,
-               ".l"[rdp->all_lazy],
-               ".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)],
-               ".D"[!rdp->tick_nohz_enabled_snap]);
-}
-
-#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
-
-static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
-{
-       *cp = '\0';
-}
-
-#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
-
-/* Initiate the stall-info list. */
-static void print_cpu_stall_info_begin(void)
-{
-       pr_cont("\n");
-}
-
-/*
- * Print out diagnostic information for the specified stalled CPU.
- *
- * If the specified CPU is aware of the current RCU grace period, then
- * print the number of scheduling clock interrupts the CPU has taken
- * during the time that it has been aware.  Otherwise, print the number
- * of RCU grace periods that this CPU is ignorant of, for example, "1"
- * if the CPU was aware of the previous grace period.
- *
- * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
- */
-static void print_cpu_stall_info(int cpu)
-{
-       unsigned long delta;
-       char fast_no_hz[72];
-       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
-       char *ticks_title;
-       unsigned long ticks_value;
-
-       /*
-        * We could be printing a lot while holding a spinlock.  Avoid
-        * triggering hard lockup.
-        */
-       touch_nmi_watchdog();
-
-       ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
-       if (ticks_value) {
-               ticks_title = "GPs behind";
-       } else {
-               ticks_title = "ticks this GP";
-               ticks_value = rdp->ticks_this_gp;
-       }
-       print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
-       delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
-       pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
-              cpu,
-              "O."[!!cpu_online(cpu)],
-              "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
-              "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
-              !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
-                       rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
-                               "!."[!delta],
-              ticks_value, ticks_title,
-              rcu_dynticks_snap(rdp) & 0xfff,
-              rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
-              rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
-              READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
-              fast_no_hz);
-}
-
-/* Terminate the stall-info list. */
-static void print_cpu_stall_info_end(void)
-{
-       pr_err("\t");
-}
-
-/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
-static void zero_cpu_stall_ticks(struct rcu_data *rdp)
-{
-       rdp->ticks_this_gp = 0;
-       rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
-       WRITE_ONCE(rdp->last_fqs_resched, jiffies);
-}
-
 #ifdef CONFIG_RCU_NOCB_CPU
 
 /*
@@ -1766,11 +1556,22 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp)
  */
 
 
-/* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
+/*
+ * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
+ * The string after the "rcu_nocbs=" is either "all" for all CPUs, or a
+ * comma-separated list of CPUs and/or CPU ranges.  If an invalid list is
+ * given, a warning is emitted and all CPUs are offloaded.
+ */
 static int __init rcu_nocb_setup(char *str)
 {
        alloc_bootmem_cpumask_var(&rcu_nocb_mask);
-       cpulist_parse(str, rcu_nocb_mask);
+       if (!strcasecmp(str, "all"))
+               cpumask_setall(rcu_nocb_mask);
+       else
+               if (cpulist_parse(str, rcu_nocb_mask)) {
+                       pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
+                       cpumask_setall(rcu_nocb_mask);
+               }
        return 1;
 }
 __setup("rcu_nocbs=", rcu_nocb_setup);
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
new file mode 100644 (file)
index 0000000..f65a73a
--- /dev/null
@@ -0,0 +1,709 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RCU CPU stall warnings for normal RCU grace periods
+ *
+ * Copyright IBM Corporation, 2019
+ *
+ * Author: Paul E. McKenney <paulmck@linux.ibm.com>
+ */
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Controlling CPU stall warnings, including delay calculation.
+
+/* panic() on RCU Stall sysctl. */
+int sysctl_panic_on_rcu_stall __read_mostly;
+
+#ifdef CONFIG_PROVE_RCU
+#define RCU_STALL_DELAY_DELTA         (5 * HZ)
+#else
+#define RCU_STALL_DELAY_DELTA         0
+#endif
+
+/* Limit-check stall timeouts specified at boottime and runtime. */
+int rcu_jiffies_till_stall_check(void)
+{
+       int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
+
+       /*
+        * Limit check must be consistent with the Kconfig limits
+        * for CONFIG_RCU_CPU_STALL_TIMEOUT.
+        */
+       if (till_stall_check < 3) {
+               WRITE_ONCE(rcu_cpu_stall_timeout, 3);
+               till_stall_check = 3;
+       } else if (till_stall_check > 300) {
+               WRITE_ONCE(rcu_cpu_stall_timeout, 300);
+               till_stall_check = 300;
+       }
+       return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
+}
+EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
+
+/* Don't do RCU CPU stall warnings during long sysrq printouts. */
+void rcu_sysrq_start(void)
+{
+       if (!rcu_cpu_stall_suppress)
+               rcu_cpu_stall_suppress = 2;
+}
+
+void rcu_sysrq_end(void)
+{
+       if (rcu_cpu_stall_suppress == 2)
+               rcu_cpu_stall_suppress = 0;
+}
+
+/* Don't print RCU CPU stall warnings during a kernel panic. */
+static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
+{
+       rcu_cpu_stall_suppress = 1;
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block rcu_panic_block = {
+       .notifier_call = rcu_panic,
+};
+
+static int __init check_cpu_stall_init(void)
+{
+       atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
+       return 0;
+}
+early_initcall(check_cpu_stall_init);
+
+/* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
+static void panic_on_rcu_stall(void)
+{
+       if (sysctl_panic_on_rcu_stall)
+               panic("RCU Stall\n");
+}
+
+/**
+ * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
+ *
+ * Set the stall-warning timeout way off into the future, thus preventing
+ * any RCU CPU stall-warning messages from appearing in the current set of
+ * RCU grace periods.
+ *
+ * The caller must disable hard irqs.
+ */
+void rcu_cpu_stall_reset(void)
+{
+       WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Interaction with RCU grace periods
+
+/* Start of new grace period, so record stall time (and forcing times). */
+static void record_gp_stall_check_time(void)
+{
+       unsigned long j = jiffies;
+       unsigned long j1;
+
+       rcu_state.gp_start = j;
+       j1 = rcu_jiffies_till_stall_check();
+       /* Record ->gp_start before ->jiffies_stall. */
+       smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */
+       rcu_state.jiffies_resched = j + j1 / 2;
+       rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
+}
+
+/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
+static void zero_cpu_stall_ticks(struct rcu_data *rdp)
+{
+       rdp->ticks_this_gp = 0;
+       rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
+       WRITE_ONCE(rdp->last_fqs_resched, jiffies);
+}
+
+/*
+ * If too much time has passed in the current grace period, and if
+ * so configured, go kick the relevant kthreads.
+ */
+static void rcu_stall_kick_kthreads(void)
+{
+       unsigned long j;
+
+       if (!rcu_kick_kthreads)
+               return;
+       j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
+       if (time_after(jiffies, j) && rcu_state.gp_kthread &&
+           (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
+               WARN_ONCE(1, "Kicking %s grace-period kthread\n",
+                         rcu_state.name);
+               rcu_ftrace_dump(DUMP_ALL);
+               wake_up_process(rcu_state.gp_kthread);
+               WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
+       }
+}
+
+/*
+ * Handler for the irq_work request posted about halfway into the RCU CPU
+ * stall timeout, and used to detect excessive irq disabling.  Set state
+ * appropriately, but just complain if there is unexpected state on entry.
+ */
+static void rcu_iw_handler(struct irq_work *iwp)
+{
+       struct rcu_data *rdp;
+       struct rcu_node *rnp;
+
+       rdp = container_of(iwp, struct rcu_data, rcu_iw);
+       rnp = rdp->mynode;
+       raw_spin_lock_rcu_node(rnp);
+       if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
+               rdp->rcu_iw_gp_seq = rnp->gp_seq;
+               rdp->rcu_iw_pending = false;
+       }
+       raw_spin_unlock_rcu_node(rnp);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Printing RCU CPU stall warnings
+
+#ifdef CONFIG_PREEMPT
+
+/*
+ * Dump detailed information for all tasks blocking the current RCU
+ * grace period on the specified rcu_node structure.
+ */
+static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
+{
+       unsigned long flags;
+       struct task_struct *t;
+
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
+       if (!rcu_preempt_blocked_readers_cgp(rnp)) {
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               return;
+       }
+       t = list_entry(rnp->gp_tasks->prev,
+                      struct task_struct, rcu_node_entry);
+       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
+               /*
+                * We could be printing a lot while holding a spinlock.
+                * Avoid triggering hard lockup.
+                */
+               touch_nmi_watchdog();
+               sched_show_task(t);
+       }
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+}
+
+/*
+ * Scan the current list of tasks blocked within RCU read-side critical
+ * sections, printing out the tid of each.
+ */
+static int rcu_print_task_stall(struct rcu_node *rnp)
+{
+       struct task_struct *t;
+       int ndetected = 0;
+
+       if (!rcu_preempt_blocked_readers_cgp(rnp))
+               return 0;
+       pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
+              rnp->level, rnp->grplo, rnp->grphi);
+       t = list_entry(rnp->gp_tasks->prev,
+                      struct task_struct, rcu_node_entry);
+       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
+               pr_cont(" P%d", t->pid);
+               ndetected++;
+       }
+       pr_cont("\n");
+       return ndetected;
+}
+
+#else /* #ifdef CONFIG_PREEMPT */
+
+/*
+ * Because preemptible RCU does not exist, we never have to check for
+ * tasks blocked within RCU read-side critical sections.
+ */
+static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
+{
+}
+
+/*
+ * Because preemptible RCU does not exist, we never have to check for
+ * tasks blocked within RCU read-side critical sections.
+ */
+static int rcu_print_task_stall(struct rcu_node *rnp)
+{
+       return 0;
+}
+#endif /* #else #ifdef CONFIG_PREEMPT */
+
+/*
+ * Dump stacks of all tasks running on stalled CPUs.  First try using
+ * NMIs, but fall back to manual remote stack tracing on architectures
+ * that don't support NMI-based stack dumps.  The NMI-triggered stack
+ * traces are more accurate because they are printed by the target CPU.
+ */
+static void rcu_dump_cpu_stacks(void)
+{
+       int cpu;
+       unsigned long flags;
+       struct rcu_node *rnp;
+
+       rcu_for_each_leaf_node(rnp) {
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
+               for_each_leaf_node_possible_cpu(rnp, cpu)
+                       if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
+                               if (!trigger_single_cpu_backtrace(cpu))
+                                       dump_cpu_task(cpu);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+       }
+}
+
+#ifdef CONFIG_RCU_FAST_NO_HZ
+
+static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
+{
+       struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+
+       sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c",
+               rdp->last_accelerate & 0xffff, jiffies & 0xffff,
+               ".l"[rdp->all_lazy],
+               ".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)],
+               ".D"[!!rdp->tick_nohz_enabled_snap]);
+}
+
+#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
+
+static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
+{
+       *cp = '\0';
+}
+
+#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
+
+/*
+ * Print out diagnostic information for the specified stalled CPU.
+ *
+ * If the specified CPU is aware of the current RCU grace period, then
+ * print the number of scheduling clock interrupts the CPU has taken
+ * during the time that it has been aware.  Otherwise, print the number
+ * of RCU grace periods that this CPU is ignorant of, for example, "1"
+ * if the CPU was aware of the previous grace period.
+ *
+ * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
+ */
+static void print_cpu_stall_info(int cpu)
+{
+       unsigned long delta;
+       char fast_no_hz[72];
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+       char *ticks_title;
+       unsigned long ticks_value;
+
+       /*
+        * We could be printing a lot while holding a spinlock.  Avoid
+        * triggering hard lockup.
+        */
+       touch_nmi_watchdog();
+
+       ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
+       if (ticks_value) {
+               ticks_title = "GPs behind";
+       } else {
+               ticks_title = "ticks this GP";
+               ticks_value = rdp->ticks_this_gp;
+       }
+       print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
+       delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
+       pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
+              cpu,
+              "O."[!!cpu_online(cpu)],
+              "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
+              "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
+              !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
+                       rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
+                               "!."[!delta],
+              ticks_value, ticks_title,
+              rcu_dynticks_snap(rdp) & 0xfff,
+              rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
+              rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
+              READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
+              fast_no_hz);
+}
+
+/* Complain about starvation of grace-period kthread.  */
+static void rcu_check_gp_kthread_starvation(void)
+{
+       struct task_struct *gpk = rcu_state.gp_kthread;
+       unsigned long j;
+
+       j = jiffies - READ_ONCE(rcu_state.gp_activity);
+       if (j > 2 * HZ) {
+               pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
+                      rcu_state.name, j,
+                      (long)rcu_seq_current(&rcu_state.gp_seq),
+                      READ_ONCE(rcu_state.gp_flags),
+                      gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
+                      gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
+               if (gpk) {
+                       pr_err("RCU grace-period kthread stack dump:\n");
+                       sched_show_task(gpk);
+                       wake_up_process(gpk);
+               }
+       }
+}
+
+static void print_other_cpu_stall(unsigned long gp_seq)
+{
+       int cpu;
+       unsigned long flags;
+       unsigned long gpa;
+       unsigned long j;
+       int ndetected = 0;
+       struct rcu_node *rnp;
+       long totqlen = 0;
+
+       /* Kick and suppress, if so configured. */
+       rcu_stall_kick_kthreads();
+       if (rcu_cpu_stall_suppress)
+               return;
+
+       /*
+        * OK, time to rat on our buddy...
+        * See Documentation/RCU/stallwarn.txt for info on how to debug
+        * RCU CPU stall warnings.
+        */
+       pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
+       rcu_for_each_leaf_node(rnp) {
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
+               ndetected += rcu_print_task_stall(rnp);
+               if (rnp->qsmask != 0) {
+                       for_each_leaf_node_possible_cpu(rnp, cpu)
+                               if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
+                                       print_cpu_stall_info(cpu);
+                                       ndetected++;
+                               }
+               }
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+       }
+
+       for_each_possible_cpu(cpu)
+               totqlen += rcu_get_n_cbs_cpu(cpu);
+       pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
+              smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
+              (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
+       if (ndetected) {
+               rcu_dump_cpu_stacks();
+
+               /* Complain about tasks blocking the grace period. */
+               rcu_for_each_leaf_node(rnp)
+                       rcu_print_detail_task_stall_rnp(rnp);
+       } else {
+               if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
+                       pr_err("INFO: Stall ended before state dump start\n");
+               } else {
+                       j = jiffies;
+                       gpa = READ_ONCE(rcu_state.gp_activity);
+                       pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
+                              rcu_state.name, j - gpa, j, gpa,
+                              READ_ONCE(jiffies_till_next_fqs),
+                              rcu_get_root()->qsmask);
+                       /* In this case, the current CPU might be at fault. */
+                       sched_show_task(current);
+               }
+       }
+       /* Rewrite if needed in case of slow consoles. */
+       if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
+               WRITE_ONCE(rcu_state.jiffies_stall,
+                          jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
+
+       rcu_check_gp_kthread_starvation();
+
+       panic_on_rcu_stall();
+
+       rcu_force_quiescent_state();  /* Kick them all. */
+}
+
+static void print_cpu_stall(void)
+{
+       int cpu;
+       unsigned long flags;
+       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
+       struct rcu_node *rnp = rcu_get_root();
+       long totqlen = 0;
+
+       /* Kick and suppress, if so configured. */
+       rcu_stall_kick_kthreads();
+       if (rcu_cpu_stall_suppress)
+               return;
+
+       /*
+        * OK, time to rat on ourselves...
+        * See Documentation/RCU/stallwarn.txt for info on how to debug
+        * RCU CPU stall warnings.
+        */
+       pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
+       raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
+       print_cpu_stall_info(smp_processor_id());
+       raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
+       for_each_possible_cpu(cpu)
+               totqlen += rcu_get_n_cbs_cpu(cpu);
+       pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
+               jiffies - rcu_state.gp_start,
+               (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
+
+       rcu_check_gp_kthread_starvation();
+
+       rcu_dump_cpu_stacks();
+
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
+       /* Rewrite if needed in case of slow consoles. */
+       if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
+               WRITE_ONCE(rcu_state.jiffies_stall,
+                          jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+
+       panic_on_rcu_stall();
+
+       /*
+        * Attempt to revive the RCU machinery by forcing a context switch.
+        *
+        * A context switch would normally allow the RCU state machine to make
+        * progress and it could be we're stuck in kernel space without context
+        * switches for an entirely unreasonable amount of time.
+        */
+       set_tsk_need_resched(current);
+       set_preempt_need_resched();
+}
+
+static void check_cpu_stall(struct rcu_data *rdp)
+{
+       unsigned long gs1;
+       unsigned long gs2;
+       unsigned long gps;
+       unsigned long j;
+       unsigned long jn;
+       unsigned long js;
+       struct rcu_node *rnp;
+
+       if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
+           !rcu_gp_in_progress())
+               return;
+       rcu_stall_kick_kthreads();
+       j = jiffies;
+
+       /*
+        * Lots of memory barriers to reject false positives.
+        *
+        * The idea is to pick up rcu_state.gp_seq, then
+        * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
+        * another copy of rcu_state.gp_seq.  These values are updated in
+        * the opposite order with memory barriers (or equivalent) during
+        * grace-period initialization and cleanup.  Now, a false positive
+        * can occur if we get an new value of rcu_state.gp_start and a old
+        * value of rcu_state.jiffies_stall.  But given the memory barriers,
+        * the only way that this can happen is if one grace period ends
+        * and another starts between these two fetches.  This is detected
+        * by comparing the second fetch of rcu_state.gp_seq with the
+        * previous fetch from rcu_state.gp_seq.
+        *
+        * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
+        * and rcu_state.gp_start suffice to forestall false positives.
+        */
+       gs1 = READ_ONCE(rcu_state.gp_seq);
+       smp_rmb(); /* Pick up ->gp_seq first... */
+       js = READ_ONCE(rcu_state.jiffies_stall);
+       smp_rmb(); /* ...then ->jiffies_stall before the rest... */
+       gps = READ_ONCE(rcu_state.gp_start);
+       smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
+       gs2 = READ_ONCE(rcu_state.gp_seq);
+       if (gs1 != gs2 ||
+           ULONG_CMP_LT(j, js) ||
+           ULONG_CMP_GE(gps, js))
+               return; /* No stall or GP completed since entering function. */
+       rnp = rdp->mynode;
+       jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
+       if (rcu_gp_in_progress() &&
+           (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
+           cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
+
+               /* We haven't checked in, so go dump stack. */
+               print_cpu_stall();
+
+       } else if (rcu_gp_in_progress() &&
+                  ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
+                  cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
+
+               /* They had a few time units to dump stack, so complain. */
+               print_other_cpu_stall(gs2);
+       }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// RCU forward-progress mechanisms, including of callback invocation.
+
+
+/*
+ * Show the state of the grace-period kthreads.
+ */
+void show_rcu_gp_kthreads(void)
+{
+       int cpu;
+       unsigned long j;
+       unsigned long ja;
+       unsigned long jr;
+       unsigned long jw;
+       struct rcu_data *rdp;
+       struct rcu_node *rnp;
+
+       j = jiffies;
+       ja = j - READ_ONCE(rcu_state.gp_activity);
+       jr = j - READ_ONCE(rcu_state.gp_req_activity);
+       jw = j - READ_ONCE(rcu_state.gp_wake_time);
+       pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
+               rcu_state.name, gp_state_getname(rcu_state.gp_state),
+               rcu_state.gp_state,
+               rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL,
+               ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
+               (long)READ_ONCE(rcu_state.gp_seq),
+               (long)READ_ONCE(rcu_get_root()->gp_seq_needed),
+               READ_ONCE(rcu_state.gp_flags));
+       rcu_for_each_node_breadth_first(rnp) {
+               if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
+                       continue;
+               pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
+                       rnp->grplo, rnp->grphi, (long)rnp->gp_seq,
+                       (long)rnp->gp_seq_needed);
+               if (!rcu_is_leaf_node(rnp))
+                       continue;
+               for_each_leaf_node_possible_cpu(rnp, cpu) {
+                       rdp = per_cpu_ptr(&rcu_data, cpu);
+                       if (rdp->gpwrap ||
+                           ULONG_CMP_GE(rcu_state.gp_seq,
+                                        rdp->gp_seq_needed))
+                               continue;
+                       pr_info("\tcpu %d ->gp_seq_needed %ld\n",
+                               cpu, (long)rdp->gp_seq_needed);
+               }
+       }
+       /* sched_show_task(rcu_state.gp_kthread); */
+}
+EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
+
+/*
+ * This function checks for grace-period requests that fail to motivate
+ * RCU to come out of its idle mode.
+ */
+static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
+                                    const unsigned long gpssdelay)
+{
+       unsigned long flags;
+       unsigned long j;
+       struct rcu_node *rnp_root = rcu_get_root();
+       static atomic_t warned = ATOMIC_INIT(0);
+
+       if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
+           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
+               return;
+       j = jiffies; /* Expensive access, and in common case don't get here. */
+       if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
+           time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
+           atomic_read(&warned))
+               return;
+
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
+       j = jiffies;
+       if (rcu_gp_in_progress() ||
+           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
+           time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
+           time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
+           atomic_read(&warned)) {
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               return;
+       }
+       /* Hold onto the leaf lock to make others see warned==1. */
+
+       if (rnp_root != rnp)
+               raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
+       j = jiffies;
+       if (rcu_gp_in_progress() ||
+           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
+           time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
+           time_before(j, rcu_state.gp_activity + gpssdelay) ||
+           atomic_xchg(&warned, 1)) {
+               raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               return;
+       }
+       WARN_ON(1);
+       if (rnp_root != rnp)
+               raw_spin_unlock_rcu_node(rnp_root);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+       show_rcu_gp_kthreads();
+}
+
+/*
+ * Do a forward-progress check for rcutorture.  This is normally invoked
+ * due to an OOM event.  The argument "j" gives the time period during
+ * which rcutorture would like progress to have been made.
+ */
+void rcu_fwd_progress_check(unsigned long j)
+{
+       unsigned long cbs;
+       int cpu;
+       unsigned long max_cbs = 0;
+       int max_cpu = -1;
+       struct rcu_data *rdp;
+
+       if (rcu_gp_in_progress()) {
+               pr_info("%s: GP age %lu jiffies\n",
+                       __func__, jiffies - rcu_state.gp_start);
+               show_rcu_gp_kthreads();
+       } else {
+               pr_info("%s: Last GP end %lu jiffies ago\n",
+                       __func__, jiffies - rcu_state.gp_end);
+               preempt_disable();
+               rdp = this_cpu_ptr(&rcu_data);
+               rcu_check_gp_start_stall(rdp->mynode, rdp, j);
+               preempt_enable();
+       }
+       for_each_possible_cpu(cpu) {
+               cbs = rcu_get_n_cbs_cpu(cpu);
+               if (!cbs)
+                       continue;
+               if (max_cpu < 0)
+                       pr_info("%s: callbacks", __func__);
+               pr_cont(" %d: %lu", cpu, cbs);
+               if (cbs <= max_cbs)
+                       continue;
+               max_cbs = cbs;
+               max_cpu = cpu;
+       }
+       if (max_cpu >= 0)
+               pr_cont("\n");
+}
+EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
+
+/* Commandeer a sysrq key to dump RCU's tree. */
+static bool sysrq_rcu;
+module_param(sysrq_rcu, bool, 0444);
+
+/* Dump grace-period-request information due to commandeered sysrq. */
+static void sysrq_show_rcu(int key)
+{
+       show_rcu_gp_kthreads();
+}
+
+static struct sysrq_key_op sysrq_rcudump_op = {
+       .handler = sysrq_show_rcu,
+       .help_msg = "show-rcu(y)",
+       .action_msg = "Show RCU tree",
+       .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+static int __init rcu_sysrq_init(void)
+{
+       if (sysrq_rcu)
+               return register_sysrq_key('y', &sysrq_rcudump_op);
+       return 0;
+}
+early_initcall(rcu_sysrq_init);
index cbaa976c594518653bc3ef985361e812187e1ff2..c3bf44ba42e5420117601047fc7d1bdd7d511bad 100644 (file)
@@ -424,68 +424,11 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
 #endif
 
 #ifdef CONFIG_RCU_STALL_COMMON
-
-#ifdef CONFIG_PROVE_RCU
-#define RCU_STALL_DELAY_DELTA         (5 * HZ)
-#else
-#define RCU_STALL_DELAY_DELTA         0
-#endif
-
 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
-static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
-
 module_param(rcu_cpu_stall_suppress, int, 0644);
+int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
 module_param(rcu_cpu_stall_timeout, int, 0644);
-
-int rcu_jiffies_till_stall_check(void)
-{
-       int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
-
-       /*
-        * Limit check must be consistent with the Kconfig limits
-        * for CONFIG_RCU_CPU_STALL_TIMEOUT.
-        */
-       if (till_stall_check < 3) {
-               WRITE_ONCE(rcu_cpu_stall_timeout, 3);
-               till_stall_check = 3;
-       } else if (till_stall_check > 300) {
-               WRITE_ONCE(rcu_cpu_stall_timeout, 300);
-               till_stall_check = 300;
-       }
-       return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
-}
-EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
-
-void rcu_sysrq_start(void)
-{
-       if (!rcu_cpu_stall_suppress)
-               rcu_cpu_stall_suppress = 2;
-}
-
-void rcu_sysrq_end(void)
-{
-       if (rcu_cpu_stall_suppress == 2)
-               rcu_cpu_stall_suppress = 0;
-}
-
-static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
-{
-       rcu_cpu_stall_suppress = 1;
-       return NOTIFY_DONE;
-}
-
-static struct notifier_block rcu_panic_block = {
-       .notifier_call = rcu_panic,
-};
-
-static int __init check_cpu_stall_init(void)
-{
-       atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
-       return 0;
-}
-early_initcall(check_cpu_stall_init);
-
 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
 
 #ifdef CONFIG_TASKS_RCU
index 92190f62ebc53438b7da8fcd2845c7590f002e03..8c15f846e8ef22790d7865cb7d36388d54aadd67 100644 (file)
@@ -520,21 +520,20 @@ EXPORT_SYMBOL_GPL(page_is_ram);
 int region_intersects(resource_size_t start, size_t size, unsigned long flags,
                      unsigned long desc)
 {
-       resource_size_t end = start + size - 1;
+       struct resource res;
        int type = 0; int other = 0;
        struct resource *p;
 
+       res.start = start;
+       res.end = start + size - 1;
+
        read_lock(&resource_lock);
        for (p = iomem_resource.child; p ; p = p->sibling) {
                bool is_type = (((p->flags & flags) == flags) &&
                                ((desc == IORES_DESC_NONE) ||
                                 (desc == p->desc)));
 
-               if (start >= p->start && start <= p->end)
-                       is_type ? type++ : other++;
-               if (end >= p->start && end <= p->end)
-                       is_type ? type++ : other++;
-               if (p->start >= start && p->end <= end)
+               if (resource_overlaps(p, &res))
                        is_type ? type++ : other++;
        }
        read_unlock(&resource_lock);
index 25e9a7b60eba43e14db86283cb07e8a172c9de4f..9424ee90589effcc841ead2bd0f5ddb20e646a17 100644 (file)
@@ -254,8 +254,7 @@ static int rseq_ip_fixup(struct pt_regs *regs)
  * - signal delivery,
  * and return to user-space.
  *
- * This is how we can ensure that the entire rseq critical section,
- * consisting of both the C part and the assembly instruction sequence,
+ * This is how we can ensure that the entire rseq critical section
  * will issue the commit instruction only if executed atomically with
  * respect to other threads scheduled on the same CPU, and with respect
  * to signal handlers.
@@ -314,7 +313,7 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
                /* Unregister rseq for current thread. */
                if (current->rseq != rseq || !current->rseq)
                        return -EINVAL;
-               if (current->rseq_len != rseq_len)
+               if (rseq_len != sizeof(*rseq))
                        return -EINVAL;
                if (current->rseq_sig != sig)
                        return -EPERM;
@@ -322,7 +321,6 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
                if (ret)
                        return ret;
                current->rseq = NULL;
-               current->rseq_len = 0;
                current->rseq_sig = 0;
                return 0;
        }
@@ -336,7 +334,7 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
                 * the provided address differs from the prior
                 * one.
                 */
-               if (current->rseq != rseq || current->rseq_len != rseq_len)
+               if (current->rseq != rseq || rseq_len != sizeof(*rseq))
                        return -EINVAL;
                if (current->rseq_sig != sig)
                        return -EPERM;
@@ -354,7 +352,6 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
        if (!access_ok(rseq, rseq_len))
                return -EFAULT;
        current->rseq = rseq;
-       current->rseq_len = rseq_len;
        current->rseq_sig = sig;
        /*
         * If rseq was previously inactive, and has just been
index ead464a0f2e5dfc71a0c0df16a9823f3677f847a..102dfcf0a29a8539106c15c0150878c457953fbd 100644 (file)
@@ -792,10 +792,14 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
                rq->nr_uninterruptible--;
 
        enqueue_task(rq, p, flags);
+
+       p->on_rq = TASK_ON_RQ_QUEUED;
 }
 
 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
 {
+       p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
+
        if (task_contributes_to_load(p))
                rq->nr_uninterruptible++;
 
@@ -920,7 +924,7 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
 }
 
 /*
- * Per-CPU kthreads are allowed to run on !actie && online CPUs, see
+ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
  * __set_cpus_allowed_ptr() and select_fallback_rq().
  */
 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
@@ -1151,7 +1155,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
                /* Need help from migration thread: drop lock and wait. */
                task_rq_unlock(rq, p, &rf);
                stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
-               tlb_migrate_finish(p->mm);
                return 0;
        } else if (task_on_rq_queued(p)) {
                /*
@@ -1237,11 +1240,9 @@ static void __migrate_swap_task(struct task_struct *p, int cpu)
                rq_pin_lock(src_rq, &srf);
                rq_pin_lock(dst_rq, &drf);
 
-               p->on_rq = TASK_ON_RQ_MIGRATING;
                deactivate_task(src_rq, p, 0);
                set_task_cpu(p, cpu);
                activate_task(dst_rq, p, 0);
-               p->on_rq = TASK_ON_RQ_QUEUED;
                check_preempt_curr(dst_rq, p, 0);
 
                rq_unpin_lock(dst_rq, &drf);
@@ -1681,16 +1682,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
                __schedstat_inc(p->se.statistics.nr_wakeups_sync);
 }
 
-static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
-{
-       activate_task(rq, p, en_flags);
-       p->on_rq = TASK_ON_RQ_QUEUED;
-
-       /* If a worker is waking up, notify the workqueue: */
-       if (p->flags & PF_WQ_WORKER)
-               wq_worker_waking_up(p, cpu_of(rq));
-}
-
 /*
  * Mark the task runnable and perform wakeup-preemption.
  */
@@ -1742,7 +1733,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
                en_flags |= ENQUEUE_MIGRATED;
 #endif
 
-       ttwu_activate(rq, p, en_flags);
+       activate_task(rq, p, en_flags);
        ttwu_do_wakeup(rq, p, wake_flags, rf);
 }
 
@@ -2106,56 +2097,6 @@ out:
        return success;
 }
 
-/**
- * try_to_wake_up_local - try to wake up a local task with rq lock held
- * @p: the thread to be awakened
- * @rf: request-queue flags for pinning
- *
- * Put @p on the run-queue if it's not already there. The caller must
- * ensure that this_rq() is locked, @p is bound to this_rq() and not
- * the current task.
- */
-static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
-{
-       struct rq *rq = task_rq(p);
-
-       if (WARN_ON_ONCE(rq != this_rq()) ||
-           WARN_ON_ONCE(p == current))
-               return;
-
-       lockdep_assert_held(&rq->lock);
-
-       if (!raw_spin_trylock(&p->pi_lock)) {
-               /*
-                * This is OK, because current is on_cpu, which avoids it being
-                * picked for load-balance and preemption/IRQs are still
-                * disabled avoiding further scheduler activity on it and we've
-                * not yet picked a replacement task.
-                */
-               rq_unlock(rq, rf);
-               raw_spin_lock(&p->pi_lock);
-               rq_relock(rq, rf);
-       }
-
-       if (!(p->state & TASK_NORMAL))
-               goto out;
-
-       trace_sched_waking(p);
-
-       if (!task_on_rq_queued(p)) {
-               if (p->in_iowait) {
-                       delayacct_blkio_end(p);
-                       atomic_dec(&rq->nr_iowait);
-               }
-               ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK);
-       }
-
-       ttwu_do_wakeup(rq, p, 0, rf);
-       ttwu_stat(p, smp_processor_id(), 0);
-out:
-       raw_spin_unlock(&p->pi_lock);
-}
-
 /**
  * wake_up_process - Wake up a specific process
  * @p: The process to be woken up.
@@ -2467,7 +2408,6 @@ void wake_up_new_task(struct task_struct *p)
        post_init_entity_util_avg(p);
 
        activate_task(rq, p, ENQUEUE_NOCLOCK);
-       p->on_rq = TASK_ON_RQ_QUEUED;
        trace_sched_wakeup_new(p);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
@@ -3466,25 +3406,11 @@ static void __sched notrace __schedule(bool preempt)
                        prev->state = TASK_RUNNING;
                } else {
                        deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
-                       prev->on_rq = 0;
 
                        if (prev->in_iowait) {
                                atomic_inc(&rq->nr_iowait);
                                delayacct_blkio_start();
                        }
-
-                       /*
-                        * If a worker went to sleep, notify and ask workqueue
-                        * whether it wants to wake up a task to maintain
-                        * concurrency.
-                        */
-                       if (prev->flags & PF_WQ_WORKER) {
-                               struct task_struct *to_wakeup;
-
-                               to_wakeup = wq_worker_sleeping(prev);
-                               if (to_wakeup)
-                                       try_to_wake_up_local(to_wakeup, &rf);
-                       }
                }
                switch_count = &prev->nvcsw;
        }
@@ -3544,6 +3470,20 @@ static inline void sched_submit_work(struct task_struct *tsk)
 {
        if (!tsk->state || tsk_is_pi_blocked(tsk))
                return;
+
+       /*
+        * If a worker went to sleep, notify and ask workqueue whether
+        * it wants to wake up a task to maintain concurrency.
+        * As this function is called inside the schedule() context,
+        * we disable preemption to avoid it calling schedule() again
+        * in the possible wakeup of a kworker.
+        */
+       if (tsk->flags & PF_WQ_WORKER) {
+               preempt_disable();
+               wq_worker_sleeping(tsk);
+               preempt_enable_no_resched();
+       }
+
        /*
         * If we are going to sleep and we have plugged IO queued,
         * make sure to submit it to avoid deadlocks.
@@ -3552,6 +3492,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
                blk_schedule_flush_plug(tsk);
 }
 
+static void sched_update_worker(struct task_struct *tsk)
+{
+       if (tsk->flags & PF_WQ_WORKER)
+               wq_worker_running(tsk);
+}
+
 asmlinkage __visible void __sched schedule(void)
 {
        struct task_struct *tsk = current;
@@ -3562,6 +3508,7 @@ asmlinkage __visible void __sched schedule(void)
                __schedule(false);
                sched_preempt_enable_no_resched();
        } while (need_resched());
+       sched_update_worker(tsk);
 }
 EXPORT_SYMBOL(schedule);
 
@@ -5918,7 +5865,7 @@ void __init sched_init_smp(void)
 
 static int __init migration_init(void)
 {
-       sched_rq_cpu_starting(smp_processor_id());
+       sched_cpu_starting(smp_processor_id());
        return 0;
 }
 early_initcall(migration_init);
@@ -6559,6 +6506,8 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset)
 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
                                struct cftype *cftype, u64 shareval)
 {
+       if (shareval > scale_load_down(ULONG_MAX))
+               shareval = MAX_SHARES;
        return sched_group_set_shares(css_tg(css), scale_load(shareval));
 }
 
@@ -6574,7 +6523,7 @@ static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
 static DEFINE_MUTEX(cfs_constraints_mutex);
 
 const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
-const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
+static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
 
 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
 
@@ -6654,20 +6603,22 @@ out_unlock:
        return ret;
 }
 
-int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
+static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
 {
        u64 quota, period;
 
        period = ktime_to_ns(tg->cfs_bandwidth.period);
        if (cfs_quota_us < 0)
                quota = RUNTIME_INF;
-       else
+       else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
                quota = (u64)cfs_quota_us * NSEC_PER_USEC;
+       else
+               return -EINVAL;
 
        return tg_set_cfs_bandwidth(tg, period, quota);
 }
 
-long tg_get_cfs_quota(struct task_group *tg)
+static long tg_get_cfs_quota(struct task_group *tg)
 {
        u64 quota_us;
 
@@ -6680,17 +6631,20 @@ long tg_get_cfs_quota(struct task_group *tg)
        return quota_us;
 }
 
-int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
+static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
 {
        u64 quota, period;
 
+       if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
+               return -EINVAL;
+
        period = (u64)cfs_period_us * NSEC_PER_USEC;
        quota = tg->cfs_bandwidth.quota;
 
        return tg_set_cfs_bandwidth(tg, period, quota);
 }
 
-long tg_get_cfs_period(struct task_group *tg)
+static long tg_get_cfs_period(struct task_group *tg)
 {
        u64 cfs_period_us;
 
@@ -6998,7 +6952,7 @@ static int __maybe_unused cpu_period_quota_parse(char *buf,
 {
        char tok[21];   /* U64_MAX */
 
-       if (!sscanf(buf, "%s %llu", tok, periodp))
+       if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
                return -EINVAL;
 
        *periodp *= NSEC_PER_USEC;
index 835671f0f91707d04bbd07da2dbc4bee822071ef..b5dcd1d83c7fada652b79576738d9542f0638d67 100644 (file)
@@ -7,7 +7,7 @@
  */
 #include "sched.h"
 
-DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
+DEFINE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
 
 /**
  * cpufreq_add_update_util_hook - Populate the CPU's update_util_data pointer.
index 2efe629425be2491918d6331f093e76b3d806948..3638d2377e3c69f71be87ea4a638ddf0765e601a 100644 (file)
@@ -48,10 +48,10 @@ struct sugov_cpu {
 
        bool                    iowait_boost_pending;
        unsigned int            iowait_boost;
-       unsigned int            iowait_boost_max;
        u64                     last_update;
 
        unsigned long           bw_dl;
+       unsigned long           min;
        unsigned long           max;
 
        /* The field below is for single-CPU policies only: */
@@ -303,8 +303,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
        if (delta_ns <= TICK_NSEC)
                return false;
 
-       sg_cpu->iowait_boost = set_iowait_boost
-               ? sg_cpu->sg_policy->policy->min : 0;
+       sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0;
        sg_cpu->iowait_boost_pending = set_iowait_boost;
 
        return true;
@@ -344,14 +343,13 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
 
        /* Double the boost at each request */
        if (sg_cpu->iowait_boost) {
-               sg_cpu->iowait_boost <<= 1;
-               if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
-                       sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
+               sg_cpu->iowait_boost =
+                       min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
                return;
        }
 
        /* First wakeup after IO: start with minimum boost */
-       sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
+       sg_cpu->iowait_boost = sg_cpu->min;
 }
 
 /**
@@ -373,47 +371,38 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
  * This mechanism is designed to boost high frequently IO waiting tasks, while
  * being more conservative on tasks which does sporadic IO operations.
  */
-static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
-                              unsigned long *util, unsigned long *max)
+static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
+                                       unsigned long util, unsigned long max)
 {
-       unsigned int boost_util, boost_max;
+       unsigned long boost;
 
        /* No boost currently required */
        if (!sg_cpu->iowait_boost)
-               return;
+               return util;
 
        /* Reset boost if the CPU appears to have been idle enough */
        if (sugov_iowait_reset(sg_cpu, time, false))
-               return;
+               return util;
 
-       /*
-        * An IO waiting task has just woken up:
-        * allow to further double the boost value
-        */
-       if (sg_cpu->iowait_boost_pending) {
-               sg_cpu->iowait_boost_pending = false;
-       } else {
+       if (!sg_cpu->iowait_boost_pending) {
                /*
-                * Otherwise: reduce the boost value and disable it when we
-                * reach the minimum.
+                * No boost pending; reduce the boost value.
                 */
                sg_cpu->iowait_boost >>= 1;
-               if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
+               if (sg_cpu->iowait_boost < sg_cpu->min) {
                        sg_cpu->iowait_boost = 0;
-                       return;
+                       return util;
                }
        }
 
+       sg_cpu->iowait_boost_pending = false;
+
        /*
-        * Apply the current boost value: a CPU is boosted only if its current
-        * utilization is smaller then the current IO boost level.
+        * @util is already in capacity scale; convert iowait_boost
+        * into the same scale so we can compare.
         */
-       boost_util = sg_cpu->iowait_boost;
-       boost_max = sg_cpu->iowait_boost_max;
-       if (*util * boost_max < *max * boost_util) {
-               *util = boost_util;
-               *max = boost_max;
-       }
+       boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
+       return max(boost, util);
 }
 
 #ifdef CONFIG_NO_HZ_COMMON
@@ -460,7 +449,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
 
        util = sugov_get_util(sg_cpu);
        max = sg_cpu->max;
-       sugov_iowait_apply(sg_cpu, time, &util, &max);
+       util = sugov_iowait_apply(sg_cpu, time, util, max);
        next_f = get_next_freq(sg_policy, util, max);
        /*
         * Do not reduce the frequency if the CPU has not been idle
@@ -500,7 +489,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
 
                j_util = sugov_get_util(j_sg_cpu);
                j_max = j_sg_cpu->max;
-               sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max);
+               j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
 
                if (j_util * max > j_max * util) {
                        util = j_util;
@@ -782,6 +771,7 @@ out:
        return 0;
 
 fail:
+       kobject_put(&tunables->attr_set.kobj);
        policy->governor_data = NULL;
        sugov_tunables_free(tunables);
 
@@ -837,7 +827,9 @@ static int sugov_start(struct cpufreq_policy *policy)
                memset(sg_cpu, 0, sizeof(*sg_cpu));
                sg_cpu->cpu                     = cpu;
                sg_cpu->sg_policy               = sg_policy;
-               sg_cpu->iowait_boost_max        = policy->cpuinfo.max_freq;
+               sg_cpu->min                     =
+                       (SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
+                       policy->cpuinfo.max_freq;
        }
 
        for_each_cpu(cpu, policy->cpus) {
index 6a73e41a20160bf760e09ca050a82657ccf95347..43901fa3f26932d334f34f4b474a3cb55821513a 100644 (file)
@@ -252,7 +252,6 @@ static void task_non_contending(struct task_struct *p)
        if (dl_entity_is_special(dl_se))
                return;
 
-       WARN_ON(hrtimer_active(&dl_se->inactive_timer));
        WARN_ON(dl_se->dl_non_contending);
 
        zerolag_time = dl_se->deadline -
@@ -269,7 +268,7 @@ static void task_non_contending(struct task_struct *p)
         * If the "0-lag time" already passed, decrease the active
         * utilization now, instead of starting a timer
         */
-       if (zerolag_time < 0) {
+       if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
                if (dl_task(p))
                        sub_running_bw(dl_se, dl_rq);
                if (!dl_task(p) || p->state == TASK_DEAD) {
index 8039d62ae36e6fd9165a7ec1751200db32c5bf15..678bfb9bd87f7c40cbc7533d5659c4fdc8add6ef 100644 (file)
@@ -702,7 +702,7 @@ do {                                                                        \
 
 static const char *sched_tunable_scaling_names[] = {
        "none",
-       "logaritmic",
+       "logarithmic",
        "linear"
 };
 
index ea74d43924b25f7ae98788532d2070152b6cb5fc..f35930f5e528a8e1ca8e5f8ed5a6556c86a54701 100644 (file)
@@ -2007,6 +2007,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
        if (p->last_task_numa_placement) {
                delta = runtime - p->last_sum_exec_runtime;
                *period = now - p->last_task_numa_placement;
+
+               /* Avoid time going backwards, prevent potential divide error: */
+               if (unlikely((s64)*period < 0))
+                       *period = 0;
        } else {
                delta = p->se.avg.load_sum;
                *period = LOAD_AVG_MAX;
@@ -2593,7 +2597,7 @@ out:
 /*
  * Drive the periodic memory faults..
  */
-void task_tick_numa(struct rq *rq, struct task_struct *curr)
+static void task_tick_numa(struct rq *rq, struct task_struct *curr)
 {
        struct callback_head *work = &curr->numa_work;
        u64 period, now;
@@ -3567,7 +3571,7 @@ static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
  * Synchronize entity load avg of dequeued entity without locking
  * the previous rq.
  */
-void sync_entity_load_avg(struct sched_entity *se)
+static void sync_entity_load_avg(struct sched_entity *se)
 {
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
        u64 last_update_time;
@@ -3580,7 +3584,7 @@ void sync_entity_load_avg(struct sched_entity *se)
  * Task first catches up with cfs_rq, and then subtract
  * itself from the cfs_rq (task must be off the queue now).
  */
-void remove_entity_load_avg(struct sched_entity *se)
+static void remove_entity_load_avg(struct sched_entity *se)
 {
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
        unsigned long flags;
@@ -4885,6 +4889,8 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
+extern const u64 max_cfs_quota_period;
+
 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
 {
        struct cfs_bandwidth *cfs_b =
@@ -4892,6 +4898,7 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
        unsigned long flags;
        int overrun;
        int idle = 0;
+       int count = 0;
 
        raw_spin_lock_irqsave(&cfs_b->lock, flags);
        for (;;) {
@@ -4899,6 +4906,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
                if (!overrun)
                        break;
 
+               if (++count > 3) {
+                       u64 new, old = ktime_to_ns(cfs_b->period);
+
+                       new = (old * 147) / 128; /* ~115% */
+                       new = min(new, max_cfs_quota_period);
+
+                       cfs_b->period = ns_to_ktime(new);
+
+                       /* since max is 1s, this is limited to 1e9^2, which fits in u64 */
+                       cfs_b->quota *= new;
+                       cfs_b->quota = div64_u64(cfs_b->quota, old);
+
+                       pr_warn_ratelimited(
+       "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
+                               smp_processor_id(),
+                               div_u64(new, NSEC_PER_USEC),
+                               div_u64(cfs_b->quota, NSEC_PER_USEC));
+
+                       /* reset count so we don't come right back in here */
+                       count = 0;
+               }
+
                idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
        }
        if (idle)
@@ -5116,7 +5145,6 @@ static inline void hrtick_update(struct rq *rq)
 
 #ifdef CONFIG_SMP
 static inline unsigned long cpu_util(int cpu);
-static unsigned long capacity_of(int cpu);
 
 static inline bool cpu_overutilized(int cpu)
 {
@@ -7492,7 +7520,6 @@ static void detach_task(struct task_struct *p, struct lb_env *env)
 {
        lockdep_assert_held(&env->src_rq->lock);
 
-       p->on_rq = TASK_ON_RQ_MIGRATING;
        deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
        set_task_cpu(p, env->dst_cpu);
 }
@@ -7628,7 +7655,6 @@ static void attach_task(struct rq *rq, struct task_struct *p)
 
        BUG_ON(task_rq(p) != rq);
        activate_task(rq, p, ENQUEUE_NOCLOCK);
-       p->on_rq = TASK_ON_RQ_QUEUED;
        check_preempt_curr(rq, p, 0);
 }
 
@@ -7784,10 +7810,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
        if (cfs_rq->last_h_load_update == now)
                return;
 
-       cfs_rq->h_load_next = NULL;
+       WRITE_ONCE(cfs_rq->h_load_next, NULL);
        for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
-               cfs_rq->h_load_next = se;
+               WRITE_ONCE(cfs_rq->h_load_next, se);
                if (cfs_rq->last_h_load_update == now)
                        break;
        }
@@ -7797,7 +7823,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
                cfs_rq->last_h_load_update = now;
        }
 
-       while ((se = cfs_rq->h_load_next) != NULL) {
+       while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
                load = cfs_rq->h_load;
                load = div64_ul(load * se->avg.load_avg,
                        cfs_rq_load_avg(cfs_rq) + 1);
@@ -8059,6 +8085,18 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
                                (rq->cpu_capacity_orig * 100));
 }
 
+/*
+ * Check whether a rq has a misfit task and if it looks like we can actually
+ * help that task: we can migrate the task to a CPU of higher capacity, or
+ * the task's current CPU is heavily pressured.
+ */
+static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
+{
+       return rq->misfit_task_load &&
+               (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
+                check_cpu_capacity(rq, sd));
+}
+
 /*
  * Group imbalance indicates (and tries to solve) the problem where balancing
  * groups is inadequate due to ->cpus_allowed constraints.
@@ -9510,22 +9548,26 @@ static inline int on_null_domain(struct rq *rq)
  * - When one of the busy CPUs notice that there may be an idle rebalancing
  *   needed, they will kick the idle load balancer, which then does idle
  *   load balancing for all the idle CPUs.
+ * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set
+ *   anywhere yet.
  */
 
 static inline int find_new_ilb(void)
 {
-       int ilb = cpumask_first(nohz.idle_cpus_mask);
+       int ilb;
 
-       if (ilb < nr_cpu_ids && idle_cpu(ilb))
-               return ilb;
+       for_each_cpu_and(ilb, nohz.idle_cpus_mask,
+                             housekeeping_cpumask(HK_FLAG_MISC)) {
+               if (idle_cpu(ilb))
+                       return ilb;
+       }
 
        return nr_cpu_ids;
 }
 
 /*
- * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
- * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
- * CPU (if there is one).
+ * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
+ * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
  */
 static void kick_ilb(unsigned int flags)
 {
@@ -9586,35 +9628,21 @@ static void nohz_balancer_kick(struct rq *rq)
        if (time_before(now, nohz.next_balance))
                goto out;
 
-       if (rq->nr_running >= 2 || rq->misfit_task_load) {
+       if (rq->nr_running >= 2) {
                flags = NOHZ_KICK_MASK;
                goto out;
        }
 
        rcu_read_lock();
-       sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
-       if (sds) {
-               /*
-                * If there is an imbalance between LLC domains (IOW we could
-                * increase the overall cache use), we need some less-loaded LLC
-                * domain to pull some load. Likewise, we may need to spread
-                * load within the current LLC domain (e.g. packed SMT cores but
-                * other CPUs are idle). We can't really know from here how busy
-                * the others are - so just get a nohz balance going if it looks
-                * like this LLC domain has tasks we could move.
-                */
-               nr_busy = atomic_read(&sds->nr_busy_cpus);
-               if (nr_busy > 1) {
-                       flags = NOHZ_KICK_MASK;
-                       goto unlock;
-               }
-
-       }
 
        sd = rcu_dereference(rq->sd);
        if (sd) {
-               if ((rq->cfs.h_nr_running >= 1) &&
-                   check_cpu_capacity(rq, sd)) {
+               /*
+                * If there's a CFS task and the current CPU has reduced
+                * capacity; kick the ILB to see if there's a better CPU to run
+                * on.
+                */
+               if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
                        flags = NOHZ_KICK_MASK;
                        goto unlock;
                }
@@ -9622,6 +9650,11 @@ static void nohz_balancer_kick(struct rq *rq)
 
        sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
        if (sd) {
+               /*
+                * When ASYM_PACKING; see if there's a more preferred CPU
+                * currently idle; in which case, kick the ILB to move tasks
+                * around.
+                */
                for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
                        if (sched_asym_prefer(i, cpu)) {
                                flags = NOHZ_KICK_MASK;
@@ -9629,6 +9662,45 @@ static void nohz_balancer_kick(struct rq *rq)
                        }
                }
        }
+
+       sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
+       if (sd) {
+               /*
+                * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU
+                * to run the misfit task on.
+                */
+               if (check_misfit_status(rq, sd)) {
+                       flags = NOHZ_KICK_MASK;
+                       goto unlock;
+               }
+
+               /*
+                * For asymmetric systems, we do not want to nicely balance
+                * cache use, instead we want to embrace asymmetry and only
+                * ensure tasks have enough CPU capacity.
+                *
+                * Skip the LLC logic because it's not relevant in that case.
+                */
+               goto unlock;
+       }
+
+       sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
+       if (sds) {
+               /*
+                * If there is an imbalance between LLC domains (IOW we could
+                * increase the overall cache use), we need some less-loaded LLC
+                * domain to pull some load. Likewise, we may need to spread
+                * load within the current LLC domain (e.g. packed SMT cores but
+                * other CPUs are idle). We can't really know from here how busy
+                * the others are - so just get a nohz balance going if it looks
+                * like this LLC domain has tasks we could move.
+                */
+               nr_busy = atomic_read(&sds->nr_busy_cpus);
+               if (nr_busy > 1) {
+                       flags = NOHZ_KICK_MASK;
+                       goto unlock;
+               }
+       }
 unlock:
        rcu_read_unlock();
 out:
index b02d148e767273c3a2641b4c66b9f0d24ea6e9ee..687302051a270d6300acf70822d95c016e1b8a33 100644 (file)
@@ -65,6 +65,7 @@ void __init housekeeping_init(void)
 static int __init housekeeping_setup(char *str, enum hk_flags flags)
 {
        cpumask_var_t non_housekeeping_mask;
+       cpumask_var_t tmp;
        int err;
 
        alloc_bootmem_cpumask_var(&non_housekeeping_mask);
@@ -75,16 +76,23 @@ static int __init housekeeping_setup(char *str, enum hk_flags flags)
                return 0;
        }
 
+       alloc_bootmem_cpumask_var(&tmp);
        if (!housekeeping_flags) {
                alloc_bootmem_cpumask_var(&housekeeping_mask);
                cpumask_andnot(housekeeping_mask,
                               cpu_possible_mask, non_housekeeping_mask);
-               if (cpumask_empty(housekeeping_mask))
+
+               cpumask_andnot(tmp, cpu_present_mask, non_housekeeping_mask);
+               if (cpumask_empty(tmp)) {
+                       pr_warn("Housekeeping: must include one present CPU, "
+                               "using boot CPU:%d\n", smp_processor_id());
                        __cpumask_set_cpu(smp_processor_id(), housekeeping_mask);
+                       __cpumask_clear_cpu(smp_processor_id(), non_housekeeping_mask);
+               }
        } else {
-               cpumask_var_t tmp;
-
-               alloc_bootmem_cpumask_var(&tmp);
+               cpumask_andnot(tmp, cpu_present_mask, non_housekeeping_mask);
+               if (cpumask_empty(tmp))
+                       __cpumask_clear_cpu(smp_processor_id(), non_housekeeping_mask);
                cpumask_andnot(tmp, cpu_possible_mask, non_housekeeping_mask);
                if (!cpumask_equal(tmp, housekeeping_mask)) {
                        pr_warn("Housekeeping: nohz_full= must match isolcpus=\n");
@@ -92,8 +100,8 @@ static int __init housekeeping_setup(char *str, enum hk_flags flags)
                        free_bootmem_cpumask_var(non_housekeeping_mask);
                        return 0;
                }
-               free_bootmem_cpumask_var(tmp);
        }
+       free_bootmem_cpumask_var(tmp);
 
        if ((flags & HK_FLAG_TICK) && !(housekeeping_flags & HK_FLAG_TICK)) {
                if (IS_ENABLED(CONFIG_NO_HZ_FULL)) {
index 90fa23d36565d1c76137ab06bedc90bb2a8342a7..1e6b909dca367c58d332a0de3d229932c6bee83f 100644 (file)
@@ -2555,6 +2555,8 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
        rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
        if (rt_runtime_us < 0)
                rt_runtime = RUNTIME_INF;
+       else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
+               return -EINVAL;
 
        return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
 }
@@ -2575,6 +2577,9 @@ int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
 {
        u64 rt_runtime, rt_period;
 
+       if (rt_period_us > U64_MAX / NSEC_PER_USEC)
+               return -EINVAL;
+
        rt_period = rt_period_us * NSEC_PER_USEC;
        rt_runtime = tg->rt_bandwidth.rt_runtime;
 
index efa686eeff2691b2d4a697f97704b9b3e0cca2d3..b52ed1ada0be8bea55bf983028fc896d72b6b473 100644 (file)
@@ -780,7 +780,7 @@ struct root_domain {
         * NULL-terminated list of performance domains intersecting with the
         * CPUs of the rd. Protected by RCU.
         */
-       struct perf_domain      *pd;
+       struct perf_domain __rcu *pd;
 };
 
 extern struct root_domain def_root_domain;
@@ -869,8 +869,8 @@ struct rq {
        atomic_t                nr_iowait;
 
 #ifdef CONFIG_SMP
-       struct root_domain      *rd;
-       struct sched_domain     *sd;
+       struct root_domain              *rd;
+       struct sched_domain __rcu       *sd;
 
        unsigned long           cpu_capacity;
        unsigned long           cpu_capacity_orig;
@@ -1324,13 +1324,13 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
        return sd;
 }
 
-DECLARE_PER_CPU(struct sched_domain *, sd_llc);
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
 DECLARE_PER_CPU(int, sd_llc_size);
 DECLARE_PER_CPU(int, sd_llc_id);
-DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
-DECLARE_PER_CPU(struct sched_domain *, sd_numa);
-DECLARE_PER_CPU(struct sched_domain *, sd_asym_packing);
-DECLARE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity);
+DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
 extern struct static_key_false sched_asym_cpucapacity;
 
 struct sched_group_capacity {
@@ -2185,7 +2185,7 @@ static inline u64 irq_time_read(int cpu)
 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
 
 #ifdef CONFIG_CPU_FREQ
-DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
+DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
 
 /**
  * cpufreq_update_util - Take a note about CPU utilization changes.
index ab7f371a3a17992221c3ed61f72b96ffd0a17dfe..f53f89df837d84786635a302209e45f4cc72da85 100644 (file)
@@ -615,13 +615,13 @@ static void destroy_sched_domains(struct sched_domain *sd)
  * the cpumask of the domain), this allows us to quickly tell if
  * two CPUs are in the same cache domain, see cpus_share_cache().
  */
-DEFINE_PER_CPU(struct sched_domain *, sd_llc);
+DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
 DEFINE_PER_CPU(int, sd_llc_size);
 DEFINE_PER_CPU(int, sd_llc_id);
-DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
-DEFINE_PER_CPU(struct sched_domain *, sd_numa);
-DEFINE_PER_CPU(struct sched_domain *, sd_asym_packing);
-DEFINE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity);
+DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
+DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
+DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
+DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
 DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
 
 static void update_top_cache_domain(int cpu)
@@ -1059,6 +1059,7 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd)
        struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
        struct sched_domain *child = sd->child;
        struct sched_group *sg;
+       bool already_visited;
 
        if (child)
                cpu = cpumask_first(sched_domain_span(child));
@@ -1066,9 +1067,14 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd)
        sg = *per_cpu_ptr(sdd->sg, cpu);
        sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
 
-       /* For claim_allocations: */
-       atomic_inc(&sg->ref);
-       atomic_inc(&sg->sgc->ref);
+       /* Increase refcounts for claim_allocations: */
+       already_visited = atomic_inc_return(&sg->ref) > 1;
+       /* sgc visits should follow a similar trend as sg */
+       WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1));
+
+       /* If we have already visited that group, it's already initialized. */
+       if (already_visited)
+               return sg;
 
        if (child) {
                cpumask_copy(sched_group_span(sg), sched_domain_span(child));
@@ -1087,8 +1093,8 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd)
 
 /*
  * build_sched_groups will build a circular linked list of the groups
- * covered by the given span, and will set each group's ->cpumask correctly,
- * and ->cpu_capacity to 0.
+ * covered by the given span, will set each group's ->cpumask correctly,
+ * and will initialize their ->sgc.
  *
  * Assumes the sched_domain tree is fully constructed
  */
@@ -2075,9 +2081,8 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
 }
 
 /*
- * Set up scheduler domains and groups. Callers must hold the hotplug lock.
- * For now this just excludes isolated CPUs, but could be used to
- * exclude other special cases in the future.
+ * Set up scheduler domains and groups.  For now this just excludes isolated
+ * CPUs, but could be used to exclude other special cases in the future.
  */
 int sched_init_domains(const struct cpumask *cpu_map)
 {
index 54a0347ca8128f09cdbbcc83e2e8f8eea633a7ab..3582eeb59893313577c7827ea1b55854ba4ed67d 100644 (file)
@@ -149,7 +149,7 @@ static void populate_seccomp_data(struct seccomp_data *sd)
 
        sd->nr = syscall_get_nr(task, regs);
        sd->arch = syscall_get_arch();
-       syscall_get_arguments(task, regs, 0, 6, args);
+       syscall_get_arguments(task, regs, args);
        sd->args[0] = args[0];
        sd->args[1] = args[1];
        sd->args[2] = args[2];
@@ -502,7 +502,10 @@ out:
  *
  * Caller must be holding current->sighand->siglock lock.
  *
- * Returns 0 on success, -ve on error.
+ * Returns 0 on success, -ve on error, or
+ *   - in TSYNC mode: the pid of a thread which was either not in the correct
+ *     seccomp mode or did not have an ancestral seccomp filter
+ *   - in NEW_LISTENER mode: the fd of the new listener
  */
 static long seccomp_attach_filter(unsigned int flags,
                                  struct seccomp_filter *filter)
@@ -1258,6 +1261,16 @@ static long seccomp_set_mode_filter(unsigned int flags,
        if (flags & ~SECCOMP_FILTER_FLAG_MASK)
                return -EINVAL;
 
+       /*
+        * In the successful case, NEW_LISTENER returns the new listener fd.
+        * But in the failure case, TSYNC returns the thread that died. If you
+        * combine these two flags, there's no way to tell whether something
+        * succeeded or failed. So, let's disallow this combination.
+        */
+       if ((flags & SECCOMP_FILTER_FLAG_TSYNC) &&
+           (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER))
+               return -EINVAL;
+
        /* Prepare the new filter before holding any locks. */
        prepared = seccomp_prepare_user_filter(filter);
        if (IS_ERR(prepared))
@@ -1304,7 +1317,7 @@ out:
                mutex_unlock(&current->signal->cred_guard_mutex);
 out_put_fd:
        if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
-               if (ret < 0) {
+               if (ret) {
                        listener_f->private_data = NULL;
                        fput(listener_f);
                        put_unused_fd(listener);
index b7953934aa994e7993254aa6b04438815ed37f1f..227ba170298e5b457c9b405c5376c466fe26850b 100644 (file)
@@ -3581,7 +3581,7 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
        if (flags)
                return -EINVAL;
 
-       f = fdget_raw(pidfd);
+       f = fdget(pidfd);
        if (!f.file)
                return -EBADF;
 
@@ -3605,16 +3605,11 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
                if (unlikely(sig != kinfo.si_signo))
                        goto err;
 
+               /* Only allow sending arbitrary signals to yourself. */
+               ret = -EPERM;
                if ((task_pid(current) != pid) &&
-                   (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) {
-                       /* Only allow sending arbitrary signals to yourself. */
-                       ret = -EPERM;
-                       if (kinfo.si_code != SI_USER)
-                               goto err;
-
-                       /* Turn this into a regular kill signal. */
-                       prepare_kill_siginfo(sig, &kinfo);
-               }
+                   (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
+                       goto err;
        } else {
                prepare_kill_siginfo(sig, &kinfo);
        }
index f8edee9c792de527cfb968664cc1ae91ea062d1f..27bafc1e271ee7e444e889cc9bff4dc4d6f40d14 100644 (file)
@@ -5,41 +5,56 @@
  *
  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  */
+#include <linux/sched/task_stack.h>
+#include <linux/sched/debug.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <linux/kallsyms.h>
 #include <linux/stacktrace.h>
 
-void print_stack_trace(struct stack_trace *trace, int spaces)
+/**
+ * stack_trace_print - Print the entries in the stack trace
+ * @entries:   Pointer to storage array
+ * @nr_entries:        Number of entries in the storage array
+ * @spaces:    Number of leading spaces to print
+ */
+void stack_trace_print(unsigned long *entries, unsigned int nr_entries,
+                      int spaces)
 {
-       int i;
+       unsigned int i;
 
-       if (WARN_ON(!trace->entries))
+       if (WARN_ON(!entries))
                return;
 
-       for (i = 0; i < trace->nr_entries; i++)
-               printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]);
+       for (i = 0; i < nr_entries; i++)
+               printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]);
 }
-EXPORT_SYMBOL_GPL(print_stack_trace);
+EXPORT_SYMBOL_GPL(stack_trace_print);
 
-int snprint_stack_trace(char *buf, size_t size,
-                       struct stack_trace *trace, int spaces)
+/**
+ * stack_trace_snprint - Print the entries in the stack trace into a buffer
+ * @buf:       Pointer to the print buffer
+ * @size:      Size of the print buffer
+ * @entries:   Pointer to storage array
+ * @nr_entries:        Number of entries in the storage array
+ * @spaces:    Number of leading spaces to print
+ *
+ * Return: Number of bytes printed.
+ */
+int stack_trace_snprint(char *buf, size_t size, unsigned long *entries,
+                       unsigned int nr_entries, int spaces)
 {
-       int i;
-       int generated;
-       int total = 0;
+       unsigned int generated, i, total = 0;
 
-       if (WARN_ON(!trace->entries))
+       if (WARN_ON(!entries))
                return 0;
 
-       for (i = 0; i < trace->nr_entries; i++) {
+       for (i = 0; i < nr_entries && size; i++) {
                generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
-                                    (void *)trace->entries[i]);
+                                    (void *)entries[i]);
 
                total += generated;
-
-               /* Assume that generated isn't a negative number */
                if (generated >= size) {
                        buf += size;
                        size = 0;
@@ -51,7 +66,176 @@ int snprint_stack_trace(char *buf, size_t size,
 
        return total;
 }
-EXPORT_SYMBOL_GPL(snprint_stack_trace);
+EXPORT_SYMBOL_GPL(stack_trace_snprint);
+
+#ifdef CONFIG_ARCH_STACKWALK
+
+struct stacktrace_cookie {
+       unsigned long   *store;
+       unsigned int    size;
+       unsigned int    skip;
+       unsigned int    len;
+};
+
+static bool stack_trace_consume_entry(void *cookie, unsigned long addr,
+                                     bool reliable)
+{
+       struct stacktrace_cookie *c = cookie;
+
+       if (c->len >= c->size)
+               return false;
+
+       if (c->skip > 0) {
+               c->skip--;
+               return true;
+       }
+       c->store[c->len++] = addr;
+       return c->len < c->size;
+}
+
+static bool stack_trace_consume_entry_nosched(void *cookie, unsigned long addr,
+                                             bool reliable)
+{
+       if (in_sched_functions(addr))
+               return true;
+       return stack_trace_consume_entry(cookie, addr, reliable);
+}
+
+/**
+ * stack_trace_save - Save a stack trace into a storage array
+ * @store:     Pointer to storage array
+ * @size:      Size of the storage array
+ * @skipnr:    Number of entries to skip at the start of the stack trace
+ *
+ * Return: Number of trace entries stored.
+ */
+unsigned int stack_trace_save(unsigned long *store, unsigned int size,
+                             unsigned int skipnr)
+{
+       stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
+       struct stacktrace_cookie c = {
+               .store  = store,
+               .size   = size,
+               .skip   = skipnr + 1,
+       };
+
+       arch_stack_walk(consume_entry, &c, current, NULL);
+       return c.len;
+}
+EXPORT_SYMBOL_GPL(stack_trace_save);
+
+/**
+ * stack_trace_save_tsk - Save a task stack trace into a storage array
+ * @task:      The task to examine
+ * @store:     Pointer to storage array
+ * @size:      Size of the storage array
+ * @skipnr:    Number of entries to skip at the start of the stack trace
+ *
+ * Return: Number of trace entries stored.
+ */
+unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store,
+                                 unsigned int size, unsigned int skipnr)
+{
+       stack_trace_consume_fn consume_entry = stack_trace_consume_entry_nosched;
+       struct stacktrace_cookie c = {
+               .store  = store,
+               .size   = size,
+               .skip   = skipnr + 1,
+       };
+
+       if (!try_get_task_stack(tsk))
+               return 0;
+
+       arch_stack_walk(consume_entry, &c, tsk, NULL);
+       put_task_stack(tsk);
+       return c.len;
+}
+
+/**
+ * stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array
+ * @regs:      Pointer to pt_regs to examine
+ * @store:     Pointer to storage array
+ * @size:      Size of the storage array
+ * @skipnr:    Number of entries to skip at the start of the stack trace
+ *
+ * Return: Number of trace entries stored.
+ */
+unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
+                                  unsigned int size, unsigned int skipnr)
+{
+       stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
+       struct stacktrace_cookie c = {
+               .store  = store,
+               .size   = size,
+               .skip   = skipnr,
+       };
+
+       arch_stack_walk(consume_entry, &c, current, regs);
+       return c.len;
+}
+
+#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
+/**
+ * stack_trace_save_tsk_reliable - Save task stack with verification
+ * @tsk:       Pointer to the task to examine
+ * @store:     Pointer to storage array
+ * @size:      Size of the storage array
+ *
+ * Return:     An error if it detects any unreliable features of the
+ *             stack. Otherwise it guarantees that the stack trace is
+ *             reliable and returns the number of entries stored.
+ *
+ * If the task is not 'current', the caller *must* ensure the task is inactive.
+ */
+int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
+                                 unsigned int size)
+{
+       stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
+       struct stacktrace_cookie c = {
+               .store  = store,
+               .size   = size,
+       };
+       int ret;
+
+       /*
+        * If the task doesn't have a stack (e.g., a zombie), the stack is
+        * "reliably" empty.
+        */
+       if (!try_get_task_stack(tsk))
+               return 0;
+
+       ret = arch_stack_walk_reliable(consume_entry, &c, tsk);
+       put_task_stack(tsk);
+       return ret;
+}
+#endif
+
+#ifdef CONFIG_USER_STACKTRACE_SUPPORT
+/**
+ * stack_trace_save_user - Save a user space stack trace into a storage array
+ * @store:     Pointer to storage array
+ * @size:      Size of the storage array
+ *
+ * Return: Number of trace entries stored.
+ */
+unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
+{
+       stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
+       struct stacktrace_cookie c = {
+               .store  = store,
+               .size   = size,
+       };
+
+       /* Trace user stack if not a kernel thread */
+       if (!current->mm)
+               return 0;
+
+       arch_stack_walk_user(consume_entry, &c, task_pt_regs(current));
+       return c.len;
+}
+#endif
+
+#else /* CONFIG_ARCH_STACKWALK */
 
 /*
  * Architectures that do not implement save_stack_trace_*()
@@ -77,3 +261,118 @@ save_stack_trace_tsk_reliable(struct task_struct *tsk,
        WARN_ONCE(1, KERN_INFO "save_stack_tsk_reliable() not implemented yet.\n");
        return -ENOSYS;
 }
+
+/**
+ * stack_trace_save - Save a stack trace into a storage array
+ * @store:     Pointer to storage array
+ * @size:      Size of the storage array
+ * @skipnr:    Number of entries to skip at the start of the stack trace
+ *
+ * Return: Number of trace entries stored
+ */
+unsigned int stack_trace_save(unsigned long *store, unsigned int size,
+                             unsigned int skipnr)
+{
+       struct stack_trace trace = {
+               .entries        = store,
+               .max_entries    = size,
+               .skip           = skipnr + 1,
+       };
+
+       save_stack_trace(&trace);
+       return trace.nr_entries;
+}
+EXPORT_SYMBOL_GPL(stack_trace_save);
+
+/**
+ * stack_trace_save_tsk - Save a task stack trace into a storage array
+ * @task:      The task to examine
+ * @store:     Pointer to storage array
+ * @size:      Size of the storage array
+ * @skipnr:    Number of entries to skip at the start of the stack trace
+ *
+ * Return: Number of trace entries stored
+ */
+unsigned int stack_trace_save_tsk(struct task_struct *task,
+                                 unsigned long *store, unsigned int size,
+                                 unsigned int skipnr)
+{
+       struct stack_trace trace = {
+               .entries        = store,
+               .max_entries    = size,
+               .skip           = skipnr + 1,
+       };
+
+       save_stack_trace_tsk(task, &trace);
+       return trace.nr_entries;
+}
+
+/**
+ * stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array
+ * @regs:      Pointer to pt_regs to examine
+ * @store:     Pointer to storage array
+ * @size:      Size of the storage array
+ * @skipnr:    Number of entries to skip at the start of the stack trace
+ *
+ * Return: Number of trace entries stored
+ */
+unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
+                                  unsigned int size, unsigned int skipnr)
+{
+       struct stack_trace trace = {
+               .entries        = store,
+               .max_entries    = size,
+               .skip           = skipnr,
+       };
+
+       save_stack_trace_regs(regs, &trace);
+       return trace.nr_entries;
+}
+
+#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
+/**
+ * stack_trace_save_tsk_reliable - Save task stack with verification
+ * @tsk:       Pointer to the task to examine
+ * @store:     Pointer to storage array
+ * @size:      Size of the storage array
+ *
+ * Return:     An error if it detects any unreliable features of the
+ *             stack. Otherwise it guarantees that the stack trace is
+ *             reliable and returns the number of entries stored.
+ *
+ * If the task is not 'current', the caller *must* ensure the task is inactive.
+ */
+int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
+                                 unsigned int size)
+{
+       struct stack_trace trace = {
+               .entries        = store,
+               .max_entries    = size,
+       };
+       int ret = save_stack_trace_tsk_reliable(tsk, &trace);
+
+       return ret ? ret : trace.nr_entries;
+}
+#endif
+
+#ifdef CONFIG_USER_STACKTRACE_SUPPORT
+/**
+ * stack_trace_save_user - Save a user space stack trace into a storage array
+ * @store:     Pointer to storage array
+ * @size:      Size of the storage array
+ *
+ * Return: Number of trace entries stored
+ */
+unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
+{
+       struct stack_trace trace = {
+               .entries        = store,
+               .max_entries    = size,
+       };
+
+       save_stack_trace_user(&trace);
+       return trace.nr_entries;
+}
+#endif /* CONFIG_USER_STACKTRACE_SUPPORT */
+
+#endif /* !CONFIG_ARCH_STACKWALK */
index e5da394d1ca3675ef6bc050660c1d5a0892915ca..c9ec050bcf46126286dba7d122b3a7240de6de65 100644 (file)
@@ -128,6 +128,7 @@ static int zero;
 static int __maybe_unused one = 1;
 static int __maybe_unused two = 2;
 static int __maybe_unused four = 4;
+static unsigned long zero_ul;
 static unsigned long one_ul = 1;
 static unsigned long long_max = LONG_MAX;
 static int one_hundred = 100;
@@ -1750,7 +1751,7 @@ static struct ctl_table fs_table[] = {
                .maxlen         = sizeof(files_stat.max_files),
                .mode           = 0644,
                .proc_handler   = proc_doulongvec_minmax,
-               .extra1         = &zero,
+               .extra1         = &zero_ul,
                .extra2         = &long_max,
        },
        {
index 2c97e8c2d29fb3351332e447323750247d950f58..0519a8805aab3f290e3fb437ff0903a0e4722d90 100644 (file)
@@ -594,7 +594,7 @@ static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now)
 {
        struct alarm *alarm = &timr->it.alarm.alarmtimer;
 
-       return ktime_sub(now, alarm->node.expires);
+       return ktime_sub(alarm->node.expires, now);
 }
 
 /**
index 95f8f3304c19696095a7a9492f735a9abbd498f4..d23b434c2ca7b0cef45643e3e7c0f42c16c1c84d 100644 (file)
@@ -89,7 +89,7 @@ struct clocksource * __init __weak clocksource_default_clock(void)
        return &clocksource_jiffies;
 }
 
-struct clocksource refined_jiffies;
+static struct clocksource refined_jiffies;
 
 int register_refined_jiffies(long cycles_per_second)
 {
index 16b80c2b4fe81d595264f6e3122b497a5cbf34cc..968e4b07918e78625691e9f11e9f38c626a18360 100644 (file)
@@ -272,7 +272,7 @@ static u64 notrace suspended_sched_clock_read(void)
        return cd.read_data[seq & 1].epoch_cyc;
 }
 
-static int sched_clock_suspend(void)
+int sched_clock_suspend(void)
 {
        struct clock_read_data *rd = &cd.read_data[0];
 
@@ -283,7 +283,7 @@ static int sched_clock_suspend(void)
        return 0;
 }
 
-static void sched_clock_resume(void)
+void sched_clock_resume(void)
 {
        struct clock_read_data *rd = &cd.read_data[0];
 
index 561641b2153f01f9b8c309cbdc5e0e80d3c548eb..59225b484e4ee00e12d61411a9120a4b68502da8 100644 (file)
@@ -46,6 +46,14 @@ ktime_t tick_period;
  *    procedure also covers cpu hotplug.
  */
 int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
+#ifdef CONFIG_NO_HZ_FULL
+/*
+ * tick_do_timer_boot_cpu indicates the boot CPU temporarily owns
+ * tick_do_timer_cpu and it should be taken over by an eligible secondary
+ * when one comes online.
+ */
+static int tick_do_timer_boot_cpu __read_mostly = -1;
+#endif
 
 /*
  * Debugging: see timer_list.c
@@ -167,6 +175,26 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
        }
 }
 
+#ifdef CONFIG_NO_HZ_FULL
+static void giveup_do_timer(void *info)
+{
+       int cpu = *(unsigned int *)info;
+
+       WARN_ON(tick_do_timer_cpu != smp_processor_id());
+
+       tick_do_timer_cpu = cpu;
+}
+
+static void tick_take_do_timer_from_boot(void)
+{
+       int cpu = smp_processor_id();
+       int from = tick_do_timer_boot_cpu;
+
+       if (from >= 0 && from != cpu)
+               smp_call_function_single(from, giveup_do_timer, &cpu, 1);
+}
+#endif
+
 /*
  * Setup the tick device
  */
@@ -186,12 +214,26 @@ static void tick_setup_device(struct tick_device *td,
                 * this cpu:
                 */
                if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
-                       if (!tick_nohz_full_cpu(cpu))
-                               tick_do_timer_cpu = cpu;
-                       else
-                               tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+                       tick_do_timer_cpu = cpu;
+
                        tick_next_period = ktime_get();
                        tick_period = NSEC_PER_SEC / HZ;
+#ifdef CONFIG_NO_HZ_FULL
+                       /*
+                        * The boot CPU may be nohz_full, in which case set
+                        * tick_do_timer_boot_cpu so the first housekeeping
+                        * secondary that comes up will take do_timer from
+                        * us.
+                        */
+                       if (tick_nohz_full_cpu(cpu))
+                               tick_do_timer_boot_cpu = cpu;
+
+               } else if (tick_do_timer_boot_cpu != -1 &&
+                                               !tick_nohz_full_cpu(cpu)) {
+                       tick_take_do_timer_from_boot();
+                       tick_do_timer_boot_cpu = -1;
+                       WARN_ON(tick_do_timer_cpu != cpu);
+#endif
                }
 
                /*
@@ -487,6 +529,7 @@ void tick_freeze(void)
                trace_suspend_resume(TPS("timekeeping_freeze"),
                                     smp_processor_id(), true);
                system_state = SYSTEM_SUSPEND;
+               sched_clock_suspend();
                timekeeping_suspend();
        } else {
                tick_suspend_local();
@@ -510,6 +553,7 @@ void tick_unfreeze(void)
 
        if (tick_freeze_depth == num_online_cpus()) {
                timekeeping_resume();
+               sched_clock_resume();
                system_state = SYSTEM_RUNNING;
                trace_suspend_resume(TPS("timekeeping_freeze"),
                                     smp_processor_id(), false);
index b50f6f22c88e584a7779eb4393a721d2ab06c772..bdf00c763ee326906cdc4e18d708bf3256fba9bd 100644 (file)
@@ -121,10 +121,16 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
         * into a long sleep. If two CPUs happen to assign themselves to
         * this duty, then the jiffies update is still serialized by
         * jiffies_lock.
+        *
+        * If nohz_full is enabled, this should not happen because the
+        * tick_do_timer_cpu never relinquishes.
         */
-       if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)
-           && !tick_nohz_full_cpu(cpu))
+       if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
+#ifdef CONFIG_NO_HZ_FULL
+               WARN_ON(tick_nohz_full_running);
+#endif
                tick_do_timer_cpu = cpu;
+       }
 #endif
 
        /* Check, if the jiffies need an update */
@@ -395,8 +401,8 @@ void __init tick_nohz_full_setup(cpumask_var_t cpumask)
 static int tick_nohz_cpu_down(unsigned int cpu)
 {
        /*
-        * The boot CPU handles housekeeping duty (unbound timers,
-        * workqueues, timekeeping, ...) on behalf of full dynticks
+        * The tick_do_timer_cpu CPU handles housekeeping duty (unbound
+        * timers, workqueues, timekeeping, ...) on behalf of full dynticks
         * CPUs. It must remain online when nohz full is enabled.
         */
        if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
@@ -423,12 +429,15 @@ void __init tick_nohz_init(void)
                return;
        }
 
-       cpu = smp_processor_id();
+       if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) &&
+                       !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) {
+               cpu = smp_processor_id();
 
-       if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
-               pr_warn("NO_HZ: Clearing %d from nohz_full range for timekeeping\n",
-                       cpu);
-               cpumask_clear_cpu(cpu, tick_nohz_full_mask);
+               if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
+                       pr_warn("NO_HZ: Clearing %d from nohz_full range "
+                               "for timekeeping\n", cpu);
+                       cpumask_clear_cpu(cpu, tick_nohz_full_mask);
+               }
        }
 
        for_each_cpu(cpu, tick_nohz_full_mask)
@@ -905,8 +914,13 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
                /*
                 * Boot safety: make sure the timekeeping duty has been
                 * assigned before entering dyntick-idle mode,
+                * tick_do_timer_cpu is TICK_DO_TIMER_BOOT
                 */
-               if (tick_do_timer_cpu == TICK_DO_TIMER_NONE)
+               if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_BOOT))
+                       return false;
+
+               /* Should not happen for nohz-full */
+               if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
                        return false;
        }
 
index 7a9b4eb7a1d5bde85e7a7b1c7747602cdd605975..141ab3ab0354f39fdb5daf7274e8d061d90a556c 100644 (file)
@@ -14,6 +14,13 @@ extern u64 timekeeping_max_deferment(void);
 extern void timekeeping_warp_clock(void);
 extern int timekeeping_suspend(void);
 extern void timekeeping_resume(void);
+#ifdef CONFIG_GENERIC_SCHED_CLOCK
+extern int sched_clock_suspend(void);
+extern void sched_clock_resume(void);
+#else
+static inline int sched_clock_suspend(void) { return 0; }
+static inline void sched_clock_resume(void) { }
+#endif
 
 extern void do_timer(unsigned long ticks);
 extern void update_wall_time(void);
index 8faa1a9aaeb978a5aeaf218a7096c3073683e08c..17b2be9bde12aa22a010788a2143eeedddc3a6c3 100644 (file)
@@ -88,6 +88,8 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
 
        if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
                return false;
+       if (num_online_cpus() <= 1)
+               return false;  /* Can't offline the last CPU. */
 
        if (verbose > 1)
                pr_alert("%s" TORTURE_FLAG
index fa79323331b22f327872ae41e185a17562f6d78a..b920358dd8f7f8cfcd226ba046e786901699c53a 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/list.h>
 #include <linux/hash.h>
 #include <linux/rcupdate.h>
+#include <linux/kprobes.h>
 
 #include <trace/events/sched.h>
 
@@ -1992,7 +1993,7 @@ static void print_bug_type(void)
  * modifying the code. @failed should be one of either:
  * EFAULT - if the problem happens on reading the @ip address
  * EINVAL - if what is read at @ip is not what was expected
- * EPERM - if the problem happens on writting to the @ip address
+ * EPERM - if the problem happens on writing to the @ip address
  */
 void ftrace_bug(int failed, struct dyn_ftrace *rec)
 {
@@ -2391,7 +2392,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
                return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
        }
 
-       return -1; /* unknow ftrace bug */
+       return -1; /* unknown ftrace bug */
 }
 
 void __weak ftrace_replace_code(int mod_flags)
@@ -3004,7 +3005,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
        int cnt;
 
        if (!num_to_init)
-               return 0;
+               return NULL;
 
        start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
        if (!pg)
@@ -4755,7 +4756,7 @@ static int
 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
                int reset, int enable)
 {
-       return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
+       return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
 }
 
 /**
@@ -5463,7 +5464,7 @@ void ftrace_create_filter_files(struct ftrace_ops *ops,
 
 /*
  * The name "destroy_filter_files" is really a misnomer. Although
- * in the future, it may actualy delete the files, but this is
+ * in the future, it may actually delete the files, but this is
  * really intended to make sure the ops passed in are disabled
  * and that when this function returns, the caller is free to
  * free the ops.
@@ -5786,7 +5787,7 @@ void ftrace_module_enable(struct module *mod)
        /*
         * If the tracing is enabled, go ahead and enable the record.
         *
-        * The reason not to enable the record immediatelly is the
+        * The reason not to enable the record immediately is the
         * inherent check of ftrace_make_nop/ftrace_make_call for
         * correct previous instructions.  Making first the NOP
         * conversion puts the module to the correct state, thus
@@ -6246,7 +6247,7 @@ void ftrace_reset_array_ops(struct trace_array *tr)
        tr->ops->func = ftrace_stub;
 }
 
-static inline void
+static nokprobe_inline void
 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
                       struct ftrace_ops *ignored, struct pt_regs *regs)
 {
@@ -6306,11 +6307,13 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
 {
        __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
 }
+NOKPROBE_SYMBOL(ftrace_ops_list_func);
 #else
 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
 {
        __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
 }
+NOKPROBE_SYMBOL(ftrace_ops_no_ops);
 #endif
 
 /*
@@ -6337,6 +6340,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
        preempt_enable_notrace();
        trace_clear_recursion(bit);
 }
+NOKPROBE_SYMBOL(ftrace_ops_assist_func);
 
 /**
  * ftrace_ops_get_func - get the function a trampoline should call
index 41b6f96e5366231d72454e6c33015188066751ae..4ee8d8aa3d0fdcfe6dac6ea91c4ee96cc9330835 100644 (file)
@@ -762,7 +762,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
 
        preempt_disable_notrace();
        time = rb_time_stamp(buffer);
-       preempt_enable_no_resched_notrace();
+       preempt_enable_notrace();
 
        return time;
 }
index 21153e64bf1c366033213e90272438ba171b2822..ec439999f38748090616406f77b93afc6f39b07a 100644 (file)
@@ -159,6 +159,8 @@ static union trace_eval_map_item *trace_eval_maps;
 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
 
 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
+static void ftrace_trace_userstack(struct ring_buffer *buffer,
+                                  unsigned long flags, int pc);
 
 #define MAX_TRACER_SIZE                100
 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
@@ -496,8 +498,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
         * not modified.
         */
        pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
-       if (!pid_list)
+       if (!pid_list) {
+               trace_parser_put(&parser);
                return -ENOMEM;
+       }
 
        pid_list->pid_max = READ_ONCE(pid_max);
 
@@ -507,6 +511,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
 
        pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
        if (!pid_list->pids) {
+               trace_parser_put(&parser);
                kfree(pid_list);
                return -ENOMEM;
        }
@@ -2749,12 +2754,21 @@ trace_function(struct trace_array *tr,
 
 #ifdef CONFIG_STACKTRACE
 
-#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
+/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
+#define FTRACE_KSTACK_NESTING  4
+
+#define FTRACE_KSTACK_ENTRIES  (PAGE_SIZE / FTRACE_KSTACK_NESTING)
+
 struct ftrace_stack {
-       unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
+       unsigned long           calls[FTRACE_KSTACK_ENTRIES];
 };
 
-static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
+
+struct ftrace_stacks {
+       struct ftrace_stack     stacks[FTRACE_KSTACK_NESTING];
+};
+
+static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
 
 static void __ftrace_trace_stack(struct ring_buffer *buffer,
@@ -2763,13 +2777,10 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
 {
        struct trace_event_call *call = &event_kernel_stack;
        struct ring_buffer_event *event;
+       unsigned int size, nr_entries;
+       struct ftrace_stack *fstack;
        struct stack_entry *entry;
-       struct stack_trace trace;
-       int use_stack;
-       int size = FTRACE_STACK_ENTRIES;
-
-       trace.nr_entries        = 0;
-       trace.skip              = skip;
+       int stackidx;
 
        /*
         * Add one, for this function and the call to save_stack_trace()
@@ -2777,7 +2788,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
         */
 #ifndef CONFIG_UNWINDER_ORC
        if (!regs)
-               trace.skip++;
+               skip++;
 #endif
 
        /*
@@ -2788,53 +2799,40 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
         */
        preempt_disable_notrace();
 
-       use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
+       stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
+
+       /* This should never happen. If it does, yell once and skip */
+       if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
+               goto out;
+
        /*
-        * We don't need any atomic variables, just a barrier.
-        * If an interrupt comes in, we don't care, because it would
-        * have exited and put the counter back to what we want.
-        * We just need a barrier to keep gcc from moving things
-        * around.
+        * The above __this_cpu_inc_return() is 'atomic' cpu local. An
+        * interrupt will either see the value pre increment or post
+        * increment. If the interrupt happens pre increment it will have
+        * restored the counter when it returns.  We just need a barrier to
+        * keep gcc from moving things around.
         */
        barrier();
-       if (use_stack == 1) {
-               trace.entries           = this_cpu_ptr(ftrace_stack.calls);
-               trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
 
-               if (regs)
-                       save_stack_trace_regs(regs, &trace);
-               else
-                       save_stack_trace(&trace);
-
-               if (trace.nr_entries > size)
-                       size = trace.nr_entries;
-       } else
-               /* From now on, use_stack is a boolean */
-               use_stack = 0;
+       fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
+       size = ARRAY_SIZE(fstack->calls);
 
-       size *= sizeof(unsigned long);
+       if (regs) {
+               nr_entries = stack_trace_save_regs(regs, fstack->calls,
+                                                  size, skip);
+       } else {
+               nr_entries = stack_trace_save(fstack->calls, size, skip);
+       }
 
+       size = nr_entries * sizeof(unsigned long);
        event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
                                            sizeof(*entry) + size, flags, pc);
        if (!event)
                goto out;
        entry = ring_buffer_event_data(event);
 
-       memset(&entry->caller, 0, size);
-
-       if (use_stack)
-               memcpy(&entry->caller, trace.entries,
-                      trace.nr_entries * sizeof(unsigned long));
-       else {
-               trace.max_entries       = FTRACE_STACK_ENTRIES;
-               trace.entries           = entry->caller;
-               if (regs)
-                       save_stack_trace_regs(regs, &trace);
-               else
-                       save_stack_trace(&trace);
-       }
-
-       entry->size = trace.nr_entries;
+       memcpy(&entry->caller, fstack->calls, size);
+       entry->size = nr_entries;
 
        if (!call_filter_check_discard(call, entry, buffer, event))
                __buffer_unlock_commit(buffer, event);
@@ -2904,15 +2902,15 @@ void trace_dump_stack(int skip)
 }
 EXPORT_SYMBOL_GPL(trace_dump_stack);
 
+#ifdef CONFIG_USER_STACKTRACE_SUPPORT
 static DEFINE_PER_CPU(int, user_stack_count);
 
-void
+static void
 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
 {
        struct trace_event_call *call = &event_user_stack;
        struct ring_buffer_event *event;
        struct userstack_entry *entry;
-       struct stack_trace trace;
 
        if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
                return;
@@ -2943,12 +2941,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
        entry->tgid             = current->tgid;
        memset(&entry->caller, 0, sizeof(entry->caller));
 
-       trace.nr_entries        = 0;
-       trace.max_entries       = FTRACE_STACK_ENTRIES;
-       trace.skip              = 0;
-       trace.entries           = entry->caller;
-
-       save_stack_trace_user(&trace);
+       stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
        if (!call_filter_check_discard(call, entry, buffer, event))
                __buffer_unlock_commit(buffer, event);
 
@@ -2957,13 +2950,12 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
  out:
        preempt_enable();
 }
-
-#ifdef UNUSED
-static void __trace_userstack(struct trace_array *tr, unsigned long flags)
+#else /* CONFIG_USER_STACKTRACE_SUPPORT */
+static void ftrace_trace_userstack(struct ring_buffer *buffer,
+                                  unsigned long flags, int pc)
 {
-       ftrace_trace_userstack(tr, flags, preempt_count());
 }
-#endif /* UNUSED */
+#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
 
 #endif /* CONFIG_STACKTRACE */
 
@@ -7025,35 +7017,43 @@ struct buffer_ref {
        struct ring_buffer      *buffer;
        void                    *page;
        int                     cpu;
-       int                     ref;
+       refcount_t              refcount;
 };
 
+static void buffer_ref_release(struct buffer_ref *ref)
+{
+       if (!refcount_dec_and_test(&ref->refcount))
+               return;
+       ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
+       kfree(ref);
+}
+
 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
                                    struct pipe_buffer *buf)
 {
        struct buffer_ref *ref = (struct buffer_ref *)buf->private;
 
-       if (--ref->ref)
-               return;
-
-       ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
-       kfree(ref);
+       buffer_ref_release(ref);
        buf->private = 0;
 }
 
-static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
+static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
                                struct pipe_buffer *buf)
 {
        struct buffer_ref *ref = (struct buffer_ref *)buf->private;
 
-       ref->ref++;
+       if (refcount_read(&ref->refcount) > INT_MAX/2)
+               return false;
+
+       refcount_inc(&ref->refcount);
+       return true;
 }
 
 /* Pipe buffer operations for a buffer. */
 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
        .confirm                = generic_pipe_buf_confirm,
        .release                = buffer_pipe_buf_release,
-       .steal                  = generic_pipe_buf_steal,
+       .steal                  = generic_pipe_buf_nosteal,
        .get                    = buffer_pipe_buf_get,
 };
 
@@ -7066,11 +7066,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
        struct buffer_ref *ref =
                (struct buffer_ref *)spd->partial[i].private;
 
-       if (--ref->ref)
-               return;
-
-       ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
-       kfree(ref);
+       buffer_ref_release(ref);
        spd->partial[i].private = 0;
 }
 
@@ -7125,7 +7121,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                        break;
                }
 
-               ref->ref = 1;
+               refcount_set(&ref->refcount, 1);
                ref->buffer = iter->trace_buffer->buffer;
                ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
                if (IS_ERR(ref->page)) {
index d80cee49e0eb4e39afb0127a4541411334e0d285..639047b259d79b34c83ee47894cbc7ec9f40b608 100644 (file)
@@ -782,17 +782,9 @@ void update_max_tr_single(struct trace_array *tr,
 #endif /* CONFIG_TRACER_MAX_TRACE */
 
 #ifdef CONFIG_STACKTRACE
-void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
-                           int pc);
-
 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
                   int pc);
 #else
-static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
-                                         unsigned long flags, int pc)
-{
-}
-
 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
                                 int skip, int pc)
 {
index 4ad967453b6fb07a08a69534c8df6eebac1e2868..3ea65cdff30d50c831f53dffd7ee743ab735ca0a 100644 (file)
@@ -205,6 +205,8 @@ void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
                          int expect, int is_constant)
 {
+       unsigned long flags = user_access_save();
+
        /* A constant is always correct */
        if (is_constant) {
                f->constant++;
@@ -223,6 +225,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
                f->data.correct++;
        else
                f->data.incorrect++;
+
+       user_access_restore(flags);
 }
 EXPORT_SYMBOL(ftrace_likely_update);
 
index dd1f43588d7097a62a84966d8a90b425fd0f2f64..fa100ed3b4de9d128cd05215c972828ee13fbb26 100644 (file)
@@ -74,7 +74,7 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type)
 static int create_dyn_event(int argc, char **argv)
 {
        struct dyn_event_operations *ops;
-       int ret;
+       int ret = -ENODEV;
 
        if (argv[0][0] == '-' || argv[0][0] == '!')
                return dyn_event_release(argc, argv, NULL);
index ca46339f30090d84d72667b4207a8272d7fa1e9f..a1d20421f4b033e037c6497ca00c22c2a1904d4a 100644 (file)
@@ -3713,7 +3713,6 @@ static void track_data_destroy(struct hist_trigger_data *hist_data,
        struct trace_event_file *file = hist_data->event_file;
 
        destroy_hist_field(data->track_data.track_var, 0);
-       destroy_hist_field(data->track_data.var_ref, 0);
 
        if (data->action == ACTION_SNAPSHOT) {
                struct track_data *track_data;
@@ -5187,7 +5186,6 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec,
        u64 var_ref_vals[TRACING_MAP_VARS_MAX];
        char compound_key[HIST_KEY_SIZE_MAX];
        struct tracing_map_elt *elt = NULL;
-       struct stack_trace stacktrace;
        struct hist_field *key_field;
        u64 field_contents;
        void *key = NULL;
@@ -5199,14 +5197,9 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec,
                key_field = hist_data->fields[i];
 
                if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
-                       stacktrace.max_entries = HIST_STACKTRACE_DEPTH;
-                       stacktrace.entries = entries;
-                       stacktrace.nr_entries = 0;
-                       stacktrace.skip = HIST_STACKTRACE_SKIP;
-
-                       memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE);
-                       save_stack_trace(&stacktrace);
-
+                       memset(entries, 0, HIST_STACKTRACE_SIZE);
+                       stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
+                                        HIST_STACKTRACE_SKIP);
                        key = entries;
                } else {
                        field_contents = key_field->fn(key_field, elt, rbe, rec);
@@ -5247,7 +5240,7 @@ static void hist_trigger_stacktrace_print(struct seq_file *m,
        unsigned int i;
 
        for (i = 0; i < max_entries; i++) {
-               if (stacktrace_entries[i] == ULONG_MAX)
+               if (!stacktrace_entries[i])
                        return;
 
                seq_printf(m, "%*c", 1 + spaces, ' ');
index eec648a0d673bed8d8ccc9ff2491ba71d7710871..5d16f73898dbd2f851fe685a51e3e325d84ca9b0 100644 (file)
 
 #include "trace.h"
 
-static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
-        { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
-unsigned stack_trace_index[STACK_TRACE_ENTRIES];
+#define STACK_TRACE_ENTRIES 500
 
-/*
- * Reserve one entry for the passed in ip. This will allow
- * us to remove most or all of the stack size overhead
- * added by the stack tracer itself.
- */
-struct stack_trace stack_trace_max = {
-       .max_entries            = STACK_TRACE_ENTRIES - 1,
-       .entries                = &stack_dump_trace[0],
-};
+static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
+static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
 
-unsigned long stack_trace_max_size;
-arch_spinlock_t stack_trace_max_lock =
+static unsigned int stack_trace_nr_entries;
+static unsigned long stack_trace_max_size;
+static arch_spinlock_t stack_trace_max_lock =
        (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 
 DEFINE_PER_CPU(int, disable_stack_tracer);
 static DEFINE_MUTEX(stack_sysctl_mutex);
 
 int stack_tracer_enabled;
-static int last_stack_tracer_enabled;
 
-void stack_trace_print(void)
+static void print_max_stack(void)
 {
        long i;
        int size;
 
        pr_emerg("        Depth    Size   Location    (%d entries)\n"
                           "        -----    ----   --------\n",
-                          stack_trace_max.nr_entries);
+                          stack_trace_nr_entries);
 
-       for (i = 0; i < stack_trace_max.nr_entries; i++) {
-               if (stack_dump_trace[i] == ULONG_MAX)
-                       break;
-               if (i+1 == stack_trace_max.nr_entries ||
-                               stack_dump_trace[i+1] == ULONG_MAX)
+       for (i = 0; i < stack_trace_nr_entries; i++) {
+               if (i + 1 == stack_trace_nr_entries)
                        size = stack_trace_index[i];
                else
                        size = stack_trace_index[i] - stack_trace_index[i+1];
@@ -65,16 +53,7 @@ void stack_trace_print(void)
        }
 }
 
-/*
- * When arch-specific code overrides this function, the following
- * data should be filled up, assuming stack_trace_max_lock is held to
- * prevent concurrent updates.
- *     stack_trace_index[]
- *     stack_trace_max
- *     stack_trace_max_size
- */
-void __weak
-check_stack(unsigned long ip, unsigned long *stack)
+static void check_stack(unsigned long ip, unsigned long *stack)
 {
        unsigned long this_size, flags; unsigned long *p, *top, *start;
        static int tracer_frame;
@@ -110,13 +89,12 @@ check_stack(unsigned long ip, unsigned long *stack)
 
        stack_trace_max_size = this_size;
 
-       stack_trace_max.nr_entries = 0;
-       stack_trace_max.skip = 0;
-
-       save_stack_trace(&stack_trace_max);
+       stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
+                                              ARRAY_SIZE(stack_dump_trace) - 1,
+                                              0);
 
        /* Skip over the overhead of the stack tracer itself */
-       for (i = 0; i < stack_trace_max.nr_entries; i++) {
+       for (i = 0; i < stack_trace_nr_entries; i++) {
                if (stack_dump_trace[i] == ip)
                        break;
        }
@@ -125,7 +103,7 @@ check_stack(unsigned long ip, unsigned long *stack)
         * Some archs may not have the passed in ip in the dump.
         * If that happens, we need to show everything.
         */
-       if (i == stack_trace_max.nr_entries)
+       if (i == stack_trace_nr_entries)
                i = 0;
 
        /*
@@ -143,15 +121,13 @@ check_stack(unsigned long ip, unsigned long *stack)
         * loop will only happen once. This code only takes place
         * on a new max, so it is far from a fast path.
         */
-       while (i < stack_trace_max.nr_entries) {
+       while (i < stack_trace_nr_entries) {
                int found = 0;
 
                stack_trace_index[x] = this_size;
                p = start;
 
-               for (; p < top && i < stack_trace_max.nr_entries; p++) {
-                       if (stack_dump_trace[i] == ULONG_MAX)
-                               break;
+               for (; p < top && i < stack_trace_nr_entries; p++) {
                        /*
                         * The READ_ONCE_NOCHECK is used to let KASAN know that
                         * this is not a stack-out-of-bounds error.
@@ -182,12 +158,10 @@ check_stack(unsigned long ip, unsigned long *stack)
                        i++;
        }
 
-       stack_trace_max.nr_entries = x;
-       for (; x < i; x++)
-               stack_dump_trace[x] = ULONG_MAX;
+       stack_trace_nr_entries = x;
 
        if (task_stack_end_corrupted(current)) {
-               stack_trace_print();
+               print_max_stack();
                BUG();
        }
 
@@ -286,7 +260,7 @@ __next(struct seq_file *m, loff_t *pos)
 {
        long n = *pos - 1;
 
-       if (n >= stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
+       if (n >= stack_trace_nr_entries)
                return NULL;
 
        m->private = (void *)n;
@@ -350,7 +324,7 @@ static int t_show(struct seq_file *m, void *v)
                seq_printf(m, "        Depth    Size   Location"
                           "    (%d entries)\n"
                           "        -----    ----   --------\n",
-                          stack_trace_max.nr_entries);
+                          stack_trace_nr_entries);
 
                if (!stack_tracer_enabled && !stack_trace_max_size)
                        print_disabled(m);
@@ -360,12 +334,10 @@ static int t_show(struct seq_file *m, void *v)
 
        i = *(long *)v;
 
-       if (i >= stack_trace_max.nr_entries ||
-           stack_dump_trace[i] == ULONG_MAX)
+       if (i >= stack_trace_nr_entries)
                return 0;
 
-       if (i+1 == stack_trace_max.nr_entries ||
-           stack_dump_trace[i+1] == ULONG_MAX)
+       if (i + 1 == stack_trace_nr_entries)
                size = stack_trace_index[i];
        else
                size = stack_trace_index[i] - stack_trace_index[i+1];
@@ -422,23 +394,21 @@ stack_trace_sysctl(struct ctl_table *table, int write,
                   void __user *buffer, size_t *lenp,
                   loff_t *ppos)
 {
+       int was_enabled;
        int ret;
 
        mutex_lock(&stack_sysctl_mutex);
+       was_enabled = !!stack_tracer_enabled;
 
        ret = proc_dointvec(table, write, buffer, lenp, ppos);
 
-       if (ret || !write ||
-           (last_stack_tracer_enabled == !!stack_tracer_enabled))
+       if (ret || !write || (was_enabled == !!stack_tracer_enabled))
                goto out;
 
-       last_stack_tracer_enabled = !!stack_tracer_enabled;
-
        if (stack_tracer_enabled)
                register_ftrace_function(&trace_ops);
        else
                unregister_ftrace_function(&trace_ops);
-
  out:
        mutex_unlock(&stack_sysctl_mutex);
        return ret;
@@ -454,7 +424,6 @@ static __init int enable_stacktrace(char *str)
                strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
 
        stack_tracer_enabled = 1;
-       last_stack_tracer_enabled = 1;
        return 1;
 }
 __setup("stacktrace", enable_stacktrace);
index f93a56d2db275be64df083344b68ec65f3c32473..fa8fbff736d684734e89f05fb149c4e83436d8f4 100644 (file)
@@ -314,6 +314,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
        unsigned long irq_flags;
+       unsigned long args[6];
        int pc;
        int syscall_nr;
        int size;
@@ -347,7 +348,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
 
        entry = ring_buffer_event_data(event);
        entry->nr = syscall_nr;
-       syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
+       syscall_get_arguments(current, regs, args);
+       memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args);
 
        event_trigger_unlock_commit(trace_file, buffer, event, entry,
                                    irq_flags, pc);
@@ -583,6 +585,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
        struct syscall_metadata *sys_data;
        struct syscall_trace_enter *rec;
        struct hlist_head *head;
+       unsigned long args[6];
        bool valid_prog_array;
        int syscall_nr;
        int rctx;
@@ -613,8 +616,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
                return;
 
        rec->nr = syscall_nr;
-       syscall_get_arguments(current, regs, 0, sys_data->nb_args,
-                              (unsigned long *)&rec->args);
+       syscall_get_arguments(current, regs, args);
+       memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args);
 
        if ((valid_prog_array &&
             !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) ||
index 8fbfda94a67be8e6fe587e5425d1773e351b7431..7f9e7b9306fe2488622f4b2fc6185419b69d549f 100644 (file)
@@ -42,9 +42,9 @@ int __read_mostly watchdog_user_enabled = 1;
 int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
 int __read_mostly soft_watchdog_user_enabled = 1;
 int __read_mostly watchdog_thresh = 10;
-int __read_mostly nmi_watchdog_available;
+static int __read_mostly nmi_watchdog_available;
 
-struct cpumask watchdog_allowed_mask __read_mostly;
+static struct cpumask watchdog_allowed_mask __read_mostly;
 
 struct cpumask watchdog_cpumask __read_mostly;
 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
@@ -554,13 +554,15 @@ static void softlockup_start_all(void)
 
 int lockup_detector_online_cpu(unsigned int cpu)
 {
-       watchdog_enable(cpu);
+       if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
+               watchdog_enable(cpu);
        return 0;
 }
 
 int lockup_detector_offline_cpu(unsigned int cpu)
 {
-       watchdog_disable(cpu);
+       if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
+               watchdog_disable(cpu);
        return 0;
 }
 
@@ -588,7 +590,7 @@ static void lockup_detector_reconfigure(void)
  * Create the watchdog thread infrastructure and configure the detector(s).
  *
  * The threads are not unparked as watchdog_allowed_mask is empty.  When
- * the threads are sucessfully initialized, take the proper locks and
+ * the threads are successfully initialized, take the proper locks and
  * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
  */
 static __init void lockup_detector_setup(void)
index 71381168dedef4e88382a1849412f554a4cb4a56..247bf0b1582ca1cf352f006aa1fd5f689f8f5859 100644 (file)
@@ -135,7 +135,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
                if (__this_cpu_read(hard_watchdog_warn) == true)
                        return;
 
-               pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+               pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
+                        this_cpu);
                print_modules();
                print_irqtrace_events(current);
                if (regs)
index 4026d1871407ecbc78dbeaff6c88b2b41aa56647..56180c9286f50e01eb3f4420d030161d6971ed5b 100644 (file)
@@ -841,43 +841,32 @@ static void wake_up_worker(struct worker_pool *pool)
 }
 
 /**
- * wq_worker_waking_up - a worker is waking up
+ * wq_worker_running - a worker is running again
  * @task: task waking up
- * @cpu: CPU @task is waking up to
  *
- * This function is called during try_to_wake_up() when a worker is
- * being awoken.
- *
- * CONTEXT:
- * spin_lock_irq(rq->lock)
+ * This function is called when a worker returns from schedule()
  */
-void wq_worker_waking_up(struct task_struct *task, int cpu)
+void wq_worker_running(struct task_struct *task)
 {
        struct worker *worker = kthread_data(task);
 
-       if (!(worker->flags & WORKER_NOT_RUNNING)) {
-               WARN_ON_ONCE(worker->pool->cpu != cpu);
+       if (!worker->sleeping)
+               return;
+       if (!(worker->flags & WORKER_NOT_RUNNING))
                atomic_inc(&worker->pool->nr_running);
-       }
+       worker->sleeping = 0;
 }
 
 /**
  * wq_worker_sleeping - a worker is going to sleep
  * @task: task going to sleep
  *
- * This function is called during schedule() when a busy worker is
- * going to sleep.  Worker on the same cpu can be woken up by
- * returning pointer to its task.
- *
- * CONTEXT:
- * spin_lock_irq(rq->lock)
- *
- * Return:
- * Worker task on @cpu to wake up, %NULL if none.
+ * This function is called from schedule() when a busy worker is
+ * going to sleep.
  */
-struct task_struct *wq_worker_sleeping(struct task_struct *task)
+void wq_worker_sleeping(struct task_struct *task)
 {
-       struct worker *worker = kthread_data(task), *to_wakeup = NULL;
+       struct worker *next, *worker = kthread_data(task);
        struct worker_pool *pool;
 
        /*
@@ -886,13 +875,15 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
         * checking NOT_RUNNING.
         */
        if (worker->flags & WORKER_NOT_RUNNING)
-               return NULL;
+               return;
 
        pool = worker->pool;
 
-       /* this can only happen on the local cpu */
-       if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id()))
-               return NULL;
+       if (WARN_ON_ONCE(worker->sleeping))
+               return;
+
+       worker->sleeping = 1;
+       spin_lock_irq(&pool->lock);
 
        /*
         * The counterpart of the following dec_and_test, implied mb,
@@ -906,9 +897,12 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
         * lock is safe.
         */
        if (atomic_dec_and_test(&pool->nr_running) &&
-           !list_empty(&pool->worklist))
-               to_wakeup = first_idle_worker(pool);
-       return to_wakeup ? to_wakeup->task : NULL;
+           !list_empty(&pool->worklist)) {
+               next = first_idle_worker(pool);
+               if (next)
+                       wake_up_process(next->task);
+       }
+       spin_unlock_irq(&pool->lock);
 }
 
 /**
@@ -4266,7 +4260,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
        INIT_LIST_HEAD(&wq->list);
 
        if (alloc_and_link_pwqs(wq) < 0)
-               goto err_free_wq;
+               goto err_unreg_lockdep;
 
        if (wq_online && init_rescuer(wq) < 0)
                goto err_destroy;
@@ -4292,9 +4286,10 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
 
        return wq;
 
-err_free_wq:
+err_unreg_lockdep:
        wq_unregister_lockdep(wq);
        wq_free_lockdep(wq);
+err_free_wq:
        free_workqueue_attrs(wq->unbound_attrs);
        kfree(wq);
        return NULL;
@@ -4928,7 +4923,7 @@ static void rebind_workers(struct worker_pool *pool)
                 *
                 * WRITE_ONCE() is necessary because @worker->flags may be
                 * tested without holding any lock in
-                * wq_worker_waking_up().  Without it, NOT_RUNNING test may
+                * wq_worker_running().  Without it, NOT_RUNNING test may
                 * fail incorrectly leading to premature concurrency
                 * management operations.
                 */
index cb68b03ca89aaf074821a1dfe5a2152c31d3a9d3..498de0e909a438b6bef54e2a471270bc51e9e19c 100644 (file)
@@ -44,6 +44,7 @@ struct worker {
        unsigned long           last_active;    /* L: last active timestamp */
        unsigned int            flags;          /* X: flags */
        int                     id;             /* I: worker id */
+       int                     sleeping;       /* None */
 
        /*
         * Opaque string set with work_set_desc().  Printed out with task
@@ -72,8 +73,8 @@ static inline struct worker *current_wq_worker(void)
  * Scheduler hooks for concurrency managed workqueue.  Only to be used from
  * sched/ and workqueue.c.
  */
-void wq_worker_waking_up(struct task_struct *task, int cpu);
-struct task_struct *wq_worker_sleeping(struct task_struct *task);
+void wq_worker_running(struct task_struct *task);
+void wq_worker_sleeping(struct task_struct *task);
 work_func_t wq_worker_last_func(struct task_struct *task);
 
 #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
index a9e56539bd11687881ec3e43c1e11cd85d8a4157..e86975bfca6ab72e3518954413ae97ad44e334bd 100644 (file)
@@ -597,6 +597,10 @@ config ARCH_HAS_UACCESS_FLUSHCACHE
 config ARCH_HAS_UACCESS_MCSAFE
        bool
 
+# Temporary. Goes away when all archs are cleaned up
+config ARCH_STACKWALK
+       bool
+
 config STACKDEPOT
        bool
        select STACKTRACE
index 0d9e81779e373745c3e28497df424b415a50c3ac..d5a4a4036d2f83db9df1c5e2183380f900e62ca3 100644 (file)
@@ -753,9 +753,9 @@ endmenu # "Memory Debugging"
 config ARCH_HAS_KCOV
        bool
        help
-         KCOV does not have any arch-specific code, but currently it is enabled
-         only for x86_64. KCOV requires testing on other archs, and most likely
-         disabling of instrumentation for some early boot code.
+         An architecture should select this when it can successfully
+         build and run with CONFIG_KCOV. This typically requires
+         disabling instrumentation for some early boot code.
 
 config CC_HAS_SANCOV_TRACE_PC
        def_bool $(cc-option,-fsanitize-coverage=trace-pc)
@@ -1929,6 +1929,7 @@ config TEST_KMOD
        depends on m
        depends on BLOCK && (64BIT || LBDAF)      # for XFS, BTRFS
        depends on NETDEVICES && NET_CORE && INET # for TUN
+       depends on BLOCK
        select TEST_LKM
        select XFS_FS
        select TUN
index 3b08673e8881a42c9abcd67269455f69fbd9ceb0..e16e7aadc41a5358e8fdc08cd585a1f6c7c9539a 100644 (file)
@@ -17,6 +17,17 @@ KCOV_INSTRUMENT_list_debug.o := n
 KCOV_INSTRUMENT_debugobjects.o := n
 KCOV_INSTRUMENT_dynamic_debug.o := n
 
+# Early boot use of cmdline, don't instrument it
+ifdef CONFIG_AMD_MEM_ENCRYPT
+KASAN_SANITIZE_string.o := n
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_string.o = -pg
+endif
+
+CFLAGS_string.o := $(call cc-option, -fno-stack-protector)
+endif
+
 lib-y := ctype.o string.o vsprintf.o cmdline.o \
         rbtree.o radix-tree.o timerqueue.o xarray.o \
         idr.o int_sqrt.o extable.o \
@@ -268,6 +279,7 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
 obj-$(CONFIG_UBSAN) += ubsan.o
 
 UBSAN_SANITIZE_ubsan.o := n
+CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
 
 obj-$(CONFIG_SBITMAP) += sbitmap.o
 
index cf7b129b0b2b08adcc1aae98f990c384761532dc..e26aa4f65eb9650111a847023f07d45091d442d3 100644 (file)
@@ -65,22 +65,16 @@ static bool fail_task(struct fault_attr *attr, struct task_struct *task)
 
 static bool fail_stacktrace(struct fault_attr *attr)
 {
-       struct stack_trace trace;
        int depth = attr->stacktrace_depth;
        unsigned long entries[MAX_STACK_TRACE_DEPTH];
-       int n;
+       int n, nr_entries;
        bool found = (attr->require_start == 0 && attr->require_end == ULONG_MAX);
 
        if (depth == 0)
                return found;
 
-       trace.nr_entries = 0;
-       trace.entries = entries;
-       trace.max_entries = depth;
-       trace.skip = 1;
-
-       save_stack_trace(&trace);
-       for (n = 0; n < trace.nr_entries; n++) {
+       nr_entries = stack_trace_save(entries, depth, 1);
+       for (n = 0; n < nr_entries; n++) {
                if (attr->reject_start <= entries[n] &&
                               entries[n] < attr->reject_end)
                        return false;
index ea36dc355da131b4a45b71d8be6f1bc69a53e637..b396d328a7643b7c1984b8e87df9da88c45e7470 100644 (file)
@@ -1528,6 +1528,7 @@ EXPORT_SYMBOL(csum_and_copy_to_iter);
 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
                struct iov_iter *i)
 {
+#ifdef CONFIG_CRYPTO
        struct ahash_request *hash = hashp;
        struct scatterlist sg;
        size_t copied;
@@ -1537,6 +1538,9 @@ size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
        ahash_request_set_crypt(hash, &sg, NULL, copied);
        crypto_ahash_update(hash);
        return copied;
+#else
+       return 0;
+#endif
 }
 EXPORT_SYMBOL(hash_and_copy_to_iter);
 
index 4525fb09484427297853ca5819dcfaffee9265f1..a8ede77afe0db70fa7992319c470c2a65c07bf58 100644 (file)
@@ -291,13 +291,14 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
 {
        const unsigned char *ip = in;
        unsigned char *op = out;
+       unsigned char *data_start;
        size_t l = in_len;
        size_t t = 0;
        signed char state_offset = -2;
        unsigned int m4_max_offset;
 
-       // LZO v0 will never write 17 as first byte,
-       // so this is used to version the bitstream
+       // LZO v0 will never write 17 as first byte (except for zero-length
+       // input), so this is used to version the bitstream
        if (bitstream_version > 0) {
                *op++ = 17;
                *op++ = bitstream_version;
@@ -306,6 +307,8 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
                m4_max_offset = M4_MAX_OFFSET_V0;
        }
 
+       data_start = op;
+
        while (l > 20) {
                size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1);
                uintptr_t ll_end = (uintptr_t) ip + ll;
@@ -324,7 +327,7 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
        if (t > 0) {
                const unsigned char *ii = in + in_len - t;
 
-               if (op == out && t <= 238) {
+               if (op == data_start && t <= 238) {
                        *op++ = (17 + t);
                } else if (t <= 3) {
                        op[state_offset] |= t;
index 6d2600ea3b5547efa35ae1572e83fc56f0a325ad..9e07e9ef1aad7e7f8b0044f6bf954ff2f4ae9099 100644 (file)
@@ -54,11 +54,9 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
        if (unlikely(in_len < 3))
                goto input_overrun;
 
-       if (likely(*ip == 17)) {
+       if (likely(in_len >= 5) && likely(*ip == 17)) {
                bitstream_version = ip[1];
                ip += 2;
-               if (unlikely(in_len < 5))
-                       goto input_overrun;
        } else {
                bitstream_version = 0;
        }
index 0a105d4af16644bcbdf8a4f62f295d3ac534e31d..97f59abc3e92583917769f232bdd9f7d5e67520f 100644 (file)
@@ -416,8 +416,12 @@ static void rht_deferred_worker(struct work_struct *work)
        else if (tbl->nest)
                err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
 
-       if (!err)
-               err = rhashtable_rehash_table(ht);
+       if (!err || err == -EEXIST) {
+               int nerr;
+
+               nerr = rhashtable_rehash_table(ht);
+               err = err ?: nerr;
+       }
 
        mutex_unlock(&ht->mutex);
 
index 5b382c1244ede33c14016142ac2d7fec4d0608da..155fe38756ecfda251f26fa8616a325dddd8d455 100644 (file)
@@ -591,6 +591,17 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
                         unsigned int cpu)
 {
+       /*
+        * Once the clear bit is set, the bit may be allocated out.
+        *
+        * Orders READ/WRITE on the asssociated instance(such as request
+        * of blk_mq) by this bit for avoiding race with re-allocation,
+        * and its pair is the memory barrier implied in __sbitmap_get_word.
+        *
+        * One invariant is that the clear bit has to be zero when the bit
+        * is in use.
+        */
+       smp_mb__before_atomic();
        sbitmap_deferred_clear_bit(&sbq->sb, nr);
 
        /*
index e513459a5601a5d19a5ad7461d8b137cd2d7a469..605c61f65d94b1e302bb59b1b29b9897213bd428 100644 (file)
@@ -194,40 +194,52 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
        return NULL;
 }
 
-void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
+/**
+ * stack_depot_fetch - Fetch stack entries from a depot
+ *
+ * @handle:            Stack depot handle which was returned from
+ *                     stack_depot_save().
+ * @entries:           Pointer to store the entries address
+ *
+ * Return: The number of trace entries for this depot.
+ */
+unsigned int stack_depot_fetch(depot_stack_handle_t handle,
+                              unsigned long **entries)
 {
        union handle_parts parts = { .handle = handle };
        void *slab = stack_slabs[parts.slabindex];
        size_t offset = parts.offset << STACK_ALLOC_ALIGN;
        struct stack_record *stack = slab + offset;
 
-       trace->nr_entries = trace->max_entries = stack->size;
-       trace->entries = stack->entries;
-       trace->skip = 0;
+       *entries = stack->entries;
+       return stack->size;
 }
-EXPORT_SYMBOL_GPL(depot_fetch_stack);
+EXPORT_SYMBOL_GPL(stack_depot_fetch);
 
 /**
- * depot_save_stack - save stack in a stack depot.
- * @trace - the stacktrace to save.
- * @alloc_flags - flags for allocating additional memory if required.
+ * stack_depot_save - Save a stack trace from an array
+ *
+ * @entries:           Pointer to storage array
+ * @nr_entries:                Size of the storage array
+ * @alloc_flags:       Allocation gfp flags
  *
- * Returns the handle of the stack struct stored in depot.
+ * Return: The handle of the stack struct stored in depot
  */
-depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
-                                   gfp_t alloc_flags)
+depot_stack_handle_t stack_depot_save(unsigned long *entries,
+                                     unsigned int nr_entries,
+                                     gfp_t alloc_flags)
 {
-       u32 hash;
-       depot_stack_handle_t retval = 0;
        struct stack_record *found = NULL, **bucket;
-       unsigned long flags;
+       depot_stack_handle_t retval = 0;
        struct page *page = NULL;
        void *prealloc = NULL;
+       unsigned long flags;
+       u32 hash;
 
-       if (unlikely(trace->nr_entries == 0))
+       if (unlikely(nr_entries == 0))
                goto fast_exit;
 
-       hash = hash_stack(trace->entries, trace->nr_entries);
+       hash = hash_stack(entries, nr_entries);
        bucket = &stack_table[hash & STACK_HASH_MASK];
 
        /*
@@ -235,8 +247,8 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
         * The smp_load_acquire() here pairs with smp_store_release() to
         * |bucket| below.
         */
-       found = find_stack(smp_load_acquire(bucket), trace->entries,
-                          trace->nr_entries, hash);
+       found = find_stack(smp_load_acquire(bucket), entries,
+                          nr_entries, hash);
        if (found)
                goto exit;
 
@@ -264,10 +276,10 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
 
        spin_lock_irqsave(&depot_lock, flags);
 
-       found = find_stack(*bucket, trace->entries, trace->nr_entries, hash);
+       found = find_stack(*bucket, entries, nr_entries, hash);
        if (!found) {
                struct stack_record *new =
-                       depot_alloc_stack(trace->entries, trace->nr_entries,
+                       depot_alloc_stack(entries, nr_entries,
                                          hash, &prealloc, alloc_flags);
                if (new) {
                        new->next = *bucket;
@@ -297,4 +309,4 @@ exit:
 fast_exit:
        return retval;
 }
-EXPORT_SYMBOL_GPL(depot_save_stack);
+EXPORT_SYMBOL_GPL(stack_depot_save);
index 38e4ca08e757cbb9bfa7b551c86062fa252a2d94..3ab861c1a857ad1d75bfdd59ff61dc51d1c7350d 100644 (file)
@@ -866,6 +866,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
 EXPORT_SYMBOL(memcmp);
 #endif
 
+#ifndef __HAVE_ARCH_BCMP
+/**
+ * bcmp - returns 0 if and only if the buffers have identical contents.
+ * @a: pointer to first buffer.
+ * @b: pointer to second buffer.
+ * @len: size of buffers.
+ *
+ * The sign or magnitude of a non-zero return value has no particular
+ * meaning, and architectures may implement their own more efficient bcmp(). So
+ * while this particular implementation is a simple (tail) call to memcmp, do
+ * not rely on anything but whether the return value is zero or non-zero.
+ */
+#undef bcmp
+int bcmp(const void *a, const void *b, size_t len)
+{
+       return memcmp(a, b, len);
+}
+EXPORT_SYMBOL(bcmp);
+#endif
+
 #ifndef __HAVE_ARCH_MEMSCAN
 /**
  * memscan - Find a character in an area of memory.
index 58eacd41526c58339a7cb35ef92a618f0f3517e4..023ba9f3b99f0eca15e09d735be03a1d42e447ba 100644 (file)
  * hit it), 'max' is the address space maximum (and we return
  * -EFAULT if we hit it).
  */
-static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
+static inline long do_strncpy_from_user(char *dst, const char __user *src,
+                                       unsigned long count, unsigned long max)
 {
        const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
-       long res = 0;
+       unsigned long res = 0;
 
        /*
         * Truncate 'max' to the user-specified limit, so that
index 1c1a1b0e38a5f5c853cf935ed06eb9abb2b56ef2..7f2db3fe311fdd49613912174b69e952413fa1a8 100644 (file)
@@ -28,7 +28,7 @@
 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
 {
        const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
-       long align, res = 0;
+       unsigned long align, res = 0;
        unsigned long c;
 
        /*
@@ -42,7 +42,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
         * Do everything aligned. But that means that we
         * need to also expand the maximum..
         */
-       align = (sizeof(long) - 1) & (unsigned long)src;
+       align = (sizeof(unsigned long) - 1) & (unsigned long)src;
        src -= align;
        max += align;
 
index 1a7077f20eae4079a25aae3c4d6703edaffd8d65..fb328e7ccb0893136e949f3581cc8b647f414de2 100644 (file)
@@ -5,16 +5,14 @@
 #include <linux/export.h>
 #include <asm/syscall.h>
 
-static int collect_syscall(struct task_struct *target, long *callno,
-                          unsigned long args[6], unsigned int maxargs,
-                          unsigned long *sp, unsigned long *pc)
+static int collect_syscall(struct task_struct *target, struct syscall_info *info)
 {
        struct pt_regs *regs;
 
        if (!try_get_task_stack(target)) {
                /* Task has no stack, so the task isn't in a syscall. */
-               *sp = *pc = 0;
-               *callno = -1;
+               memset(info, 0, sizeof(*info));
+               info->data.nr = -1;
                return 0;
        }
 
@@ -24,12 +22,13 @@ static int collect_syscall(struct task_struct *target, long *callno,
                return -EAGAIN;
        }
 
-       *sp = user_stack_pointer(regs);
-       *pc = instruction_pointer(regs);
+       info->sp = user_stack_pointer(regs);
+       info->data.instruction_pointer = instruction_pointer(regs);
 
-       *callno = syscall_get_nr(target, regs);
-       if (*callno != -1L && maxargs > 0)
-               syscall_get_arguments(target, regs, 0, maxargs, args);
+       info->data.nr = syscall_get_nr(target, regs);
+       if (info->data.nr != -1L)
+               syscall_get_arguments(target, regs,
+                                     (unsigned long *)&info->data.args[0]);
 
        put_task_stack(target);
        return 0;
@@ -38,41 +37,35 @@ static int collect_syscall(struct task_struct *target, long *callno,
 /**
  * task_current_syscall - Discover what a blocked task is doing.
  * @target:            thread to examine
- * @callno:            filled with system call number or -1
- * @args:              filled with @maxargs system call arguments
- * @maxargs:           number of elements in @args to fill
- * @sp:                        filled with user stack pointer
- * @pc:                        filled with user PC
+ * @info:              structure with the following fields:
+ *                      .sp        - filled with user stack pointer
+ *                      .data.nr   - filled with system call number or -1
+ *                      .data.args - filled with @maxargs system call arguments
+ *                      .data.instruction_pointer - filled with user PC
  *
- * If @target is blocked in a system call, returns zero with *@callno
- * set to the the call's number and @args filled in with its arguments.
- * Registers not used for system call arguments may not be available and
- * it is not kosher to use &struct user_regset calls while the system
+ * If @target is blocked in a system call, returns zero with @info.data.nr
+ * set to the the call's number and @info.data.args filled in with its
+ * arguments. Registers not used for system call arguments may not be available
+ * and it is not kosher to use &struct user_regset calls while the system
  * call is still in progress.  Note we may get this result if @target
  * has finished its system call but not yet returned to user mode, such
  * as when it's stopped for signal handling or syscall exit tracing.
  *
  * If @target is blocked in the kernel during a fault or exception,
- * returns zero with *@callno set to -1 and does not fill in @args.
- * If so, it's now safe to examine @target using &struct user_regset
- * get() calls as long as we're sure @target won't return to user mode.
+ * returns zero with *@info.data.nr set to -1 and does not fill in
+ * @info.data.args. If so, it's now safe to examine @target using
+ * &struct user_regset get() calls as long as we're sure @target won't return
+ * to user mode.
  *
  * Returns -%EAGAIN if @target does not remain blocked.
- *
- * Returns -%EINVAL if @maxargs is too large (maximum is six).
  */
-int task_current_syscall(struct task_struct *target, long *callno,
-                        unsigned long args[6], unsigned int maxargs,
-                        unsigned long *sp, unsigned long *pc)
+int task_current_syscall(struct task_struct *target, struct syscall_info *info)
 {
        long state;
        unsigned long ncsw;
 
-       if (unlikely(maxargs > 6))
-               return -EINVAL;
-
        if (target == current)
-               return collect_syscall(target, callno, args, maxargs, sp, pc);
+               return collect_syscall(target, info);
 
        state = target->state;
        if (unlikely(!state))
@@ -80,7 +73,7 @@ int task_current_syscall(struct task_struct *target, long *callno,
 
        ncsw = wait_task_inactive(target, state);
        if (unlikely(!ncsw) ||
-           unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) ||
+           unlikely(collect_syscall(target, info)) ||
            unlikely(wait_task_inactive(target, state) != ncsw))
                return -EAGAIN;
 
index 83cdcaa82bf6cbc9a78640795bc2bbd869ba92bf..f832b095afba011293b6a3da18a2170b55501325 100644 (file)
@@ -383,14 +383,14 @@ static void shuffle_array(int *arr, int n)
 static int test_func(void *private)
 {
        struct test_driver *t = private;
-       cpumask_t newmask = CPU_MASK_NONE;
        int random_array[ARRAY_SIZE(test_case_array)];
        int index, i, j, ret;
        ktime_t kt;
        u64 delta;
 
-       cpumask_set_cpu(t->cpu, &newmask);
-       set_cpus_allowed_ptr(current, &newmask);
+       ret = set_cpus_allowed_ptr(current, cpumask_of(t->cpu));
+       if (ret < 0)
+               pr_err("Failed to set affinity to %d CPU\n", t->cpu);
 
        for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
                random_array[i] = i;
index e4162f59a81ccacda275cd218193fb2ad34d71d3..ecc1793380946ee4cd81c848d84eba284e523e2e 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/sched.h>
+#include <linux/uaccess.h>
 
 #include "ubsan.h"
 
@@ -86,11 +87,13 @@ static bool is_inline_int(struct type_descriptor *type)
        return bits <= inline_bits;
 }
 
-static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
+static s_max get_signed_val(struct type_descriptor *type, void *val)
 {
        if (is_inline_int(type)) {
                unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type);
-               return ((s_max)val) << extra_bits >> extra_bits;
+               unsigned long ulong_val = (unsigned long)val;
+
+               return ((s_max)ulong_val) << extra_bits >> extra_bits;
        }
 
        if (type_bit_width(type) == 64)
@@ -99,15 +102,15 @@ static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
        return *(s_max *)val;
 }
 
-static bool val_is_negative(struct type_descriptor *type, unsigned long val)
+static bool val_is_negative(struct type_descriptor *type, void *val)
 {
        return type_is_signed(type) && get_signed_val(type, val) < 0;
 }
 
-static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
+static u_max get_unsigned_val(struct type_descriptor *type, void *val)
 {
        if (is_inline_int(type))
-               return val;
+               return (unsigned long)val;
 
        if (type_bit_width(type) == 64)
                return *(u64 *)val;
@@ -116,7 +119,7 @@ static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
 }
 
 static void val_to_string(char *str, size_t size, struct type_descriptor *type,
-       unsigned long value)
+                       void *value)
 {
        if (type_is_int(type)) {
                if (type_bit_width(type) == 128) {
@@ -163,8 +166,8 @@ static void ubsan_epilogue(unsigned long *flags)
        current->in_ubsan--;
 }
 
-static void handle_overflow(struct overflow_data *data, unsigned long lhs,
-                       unsigned long rhs, char op)
+static void handle_overflow(struct overflow_data *data, void *lhs,
+                       void *rhs, char op)
 {
 
        struct type_descriptor *type = data->type;
@@ -191,8 +194,7 @@ static void handle_overflow(struct overflow_data *data, unsigned long lhs,
 }
 
 void __ubsan_handle_add_overflow(struct overflow_data *data,
-                               unsigned long lhs,
-                               unsigned long rhs)
+                               void *lhs, void *rhs)
 {
 
        handle_overflow(data, lhs, rhs, '+');
@@ -200,23 +202,21 @@ void __ubsan_handle_add_overflow(struct overflow_data *data,
 EXPORT_SYMBOL(__ubsan_handle_add_overflow);
 
 void __ubsan_handle_sub_overflow(struct overflow_data *data,
-                               unsigned long lhs,
-                               unsigned long rhs)
+                               void *lhs, void *rhs)
 {
        handle_overflow(data, lhs, rhs, '-');
 }
 EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
 
 void __ubsan_handle_mul_overflow(struct overflow_data *data,
-                               unsigned long lhs,
-                               unsigned long rhs)
+                               void *lhs, void *rhs)
 {
        handle_overflow(data, lhs, rhs, '*');
 }
 EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
 
 void __ubsan_handle_negate_overflow(struct overflow_data *data,
-                               unsigned long old_val)
+                               void *old_val)
 {
        unsigned long flags;
        char old_val_str[VALUE_LENGTH];
@@ -237,8 +237,7 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
 
 
 void __ubsan_handle_divrem_overflow(struct overflow_data *data,
-                               unsigned long lhs,
-                               unsigned long rhs)
+                               void *lhs, void *rhs)
 {
        unsigned long flags;
        char rhs_val_str[VALUE_LENGTH];
@@ -313,6 +312,7 @@ static void handle_object_size_mismatch(struct type_mismatch_data_common *data,
 static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
                                unsigned long ptr)
 {
+       unsigned long flags = user_access_save();
 
        if (!ptr)
                handle_null_ptr_deref(data);
@@ -320,10 +320,12 @@ static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
                handle_misaligned_access(data, ptr);
        else
                handle_object_size_mismatch(data, ptr);
+
+       user_access_restore(flags);
 }
 
 void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
-                               unsigned long ptr)
+                               void *ptr)
 {
        struct type_mismatch_data_common common_data = {
                .location = &data->location,
@@ -332,12 +334,12 @@ void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
                .type_check_kind = data->type_check_kind
        };
 
-       ubsan_type_mismatch_common(&common_data, ptr);
+       ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
 }
 EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
 
 void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
-                               unsigned long ptr)
+                               void *ptr)
 {
 
        struct type_mismatch_data_common common_data = {
@@ -347,30 +349,11 @@ void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
                .type_check_kind = data->type_check_kind
        };
 
-       ubsan_type_mismatch_common(&common_data, ptr);
+       ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
 }
 EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
 
-void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data,
-                                       unsigned long bound)
-{
-       unsigned long flags;
-       char bound_str[VALUE_LENGTH];
-
-       if (suppress_report(&data->location))
-               return;
-
-       ubsan_prologue(&data->location, &flags);
-
-       val_to_string(bound_str, sizeof(bound_str), data->type, bound);
-       pr_err("variable length array bound value %s <= 0\n", bound_str);
-
-       ubsan_epilogue(&flags);
-}
-EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive);
-
-void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
-                               unsigned long index)
+void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
 {
        unsigned long flags;
        char index_str[VALUE_LENGTH];
@@ -388,7 +371,7 @@ void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
 EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
 
 void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
-                                       unsigned long lhs, unsigned long rhs)
+                                       void *lhs, void *rhs)
 {
        unsigned long flags;
        struct type_descriptor *rhs_type = data->rhs_type;
@@ -439,7 +422,7 @@ void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
 EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
 
 void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
-                               unsigned long val)
+                               void *val)
 {
        unsigned long flags;
        char val_str[VALUE_LENGTH];
index f4d8d0bd4016f42d7c9c50b66d0250367e8dd555..b8fa83864467ffa00e6b3d544440eb17a02e5a0f 100644 (file)
@@ -57,11 +57,6 @@ struct nonnull_arg_data {
        int arg_index;
 };
 
-struct vla_bound_data {
-       struct source_location location;
-       struct type_descriptor *type;
-};
-
 struct out_of_bounds_data {
        struct source_location location;
        struct type_descriptor *array_type;
index f171a83707ced436bb2bd4508060a6cd45a95905..3319e0872d014628a6e505fc80d9daeb8d8a2b47 100644 (file)
@@ -242,6 +242,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
                                                        bool check_target)
 {
        struct page *page = pfn_to_online_page(pfn);
+       struct page *block_page;
        struct page *end_page;
        unsigned long block_pfn;
 
@@ -267,20 +268,26 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
            get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
                return false;
 
+       /* Ensure the start of the pageblock or zone is online and valid */
+       block_pfn = pageblock_start_pfn(pfn);
+       block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
+       if (block_page) {
+               page = block_page;
+               pfn = block_pfn;
+       }
+
+       /* Ensure the end of the pageblock or zone is online and valid */
+       block_pfn += pageblock_nr_pages;
+       block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
+       end_page = pfn_to_online_page(block_pfn);
+       if (!end_page)
+               return false;
+
        /*
         * Only clear the hint if a sample indicates there is either a
         * free page or an LRU page in the block. One or other condition
         * is necessary for the block to be a migration source/target.
         */
-       block_pfn = pageblock_start_pfn(pfn);
-       pfn = max(block_pfn, zone->zone_start_pfn);
-       page = pfn_to_page(pfn);
-       if (zone != page_zone(page))
-               return false;
-       pfn = block_pfn + pageblock_nr_pages;
-       pfn = min(pfn, zone_end_pfn(zone));
-       end_page = pfn_to_page(pfn);
-
        do {
                if (pfn_valid_within(pfn)) {
                        if (check_source && PageLRU(page)) {
@@ -309,7 +316,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
 static void __reset_isolation_suitable(struct zone *zone)
 {
        unsigned long migrate_pfn = zone->zone_start_pfn;
-       unsigned long free_pfn = zone_end_pfn(zone);
+       unsigned long free_pfn = zone_end_pfn(zone) - 1;
        unsigned long reset_migrate = free_pfn;
        unsigned long reset_free = migrate_pfn;
        bool source_set = false;
@@ -1363,7 +1370,7 @@ fast_isolate_freepages(struct compact_control *cc)
                                count_compact_events(COMPACTISOLATED, nr_isolated);
                        } else {
                                /* If isolation fails, abort the search */
-                               order = -1;
+                               order = cc->search_order + 1;
                                page = NULL;
                        }
                }
index c0b31b6c38773f37177d1a90a9e5ddd96653449c..eee9c221280c07c22eec9c33845ec2edf003faf1 100644 (file)
@@ -79,7 +79,7 @@ void __dump_page(struct page *page, const char *reason)
                pr_warn("ksm ");
        else if (mapping) {
                pr_warn("%ps ", mapping->a_ops);
-               if (mapping->host->i_dentry.first) {
+               if (mapping->host && mapping->host->i_dentry.first) {
                        struct dentry *dentry;
                        dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
                        pr_warn("name:\"%pd\" ", dentry);
@@ -168,7 +168,7 @@ void dump_mm(const struct mm_struct *mm)
                mm_pgtables_bytes(mm),
                mm->map_count,
                mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
-               atomic64_read(&mm->pinned_vm),
+               (u64)atomic64_read(&mm->pinned_vm),
                mm->data_vm, mm->exec_vm, mm->stack_vm,
                mm->start_code, mm->end_code, mm->start_data, mm->end_data,
                mm->start_brk, mm->brk, mm->start_stack,
index f84e22685aaaaa7ff1167697af36a16960171a7d..91819b8ad9cc511ca15a3d84ff81131cd4e2d0da 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -160,8 +160,12 @@ retry:
                goto retry;
        }
 
-       if (flags & FOLL_GET)
-               get_page(page);
+       if (flags & FOLL_GET) {
+               if (unlikely(!try_get_page(page))) {
+                       page = ERR_PTR(-ENOMEM);
+                       goto out;
+               }
+       }
        if (flags & FOLL_TOUCH) {
                if ((flags & FOLL_WRITE) &&
                    !pte_dirty(pte) && !PageDirty(page))
@@ -298,7 +302,10 @@ retry_locked:
                        if (pmd_trans_unstable(pmd))
                                ret = -EBUSY;
                } else {
-                       get_page(page);
+                       if (unlikely(!try_get_page(page))) {
+                               spin_unlock(ptl);
+                               return ERR_PTR(-ENOMEM);
+                       }
                        spin_unlock(ptl);
                        lock_page(page);
                        ret = split_huge_page(page);
@@ -500,7 +507,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
                if (is_device_public_page(*page))
                        goto unmap;
        }
-       get_page(*page);
+       if (unlikely(!try_get_page(*page))) {
+               ret = -ENOMEM;
+               goto unmap;
+       }
 out:
        ret = 0;
 unmap:
@@ -1545,6 +1555,20 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
        }
 }
 
+/*
+ * Return the compund head page with ref appropriately incremented,
+ * or NULL if that failed.
+ */
+static inline struct page *try_get_compound_head(struct page *page, int refs)
+{
+       struct page *head = compound_head(page);
+       if (WARN_ON_ONCE(page_ref_count(head) < 0))
+               return NULL;
+       if (unlikely(!page_cache_add_speculative(head, refs)))
+               return NULL;
+       return head;
+}
+
 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
                         int write, struct page **pages, int *nr)
@@ -1579,9 +1603,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
 
                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
                page = pte_page(pte);
-               head = compound_head(page);
 
-               if (!page_cache_get_speculative(head))
+               head = try_get_compound_head(page, 1);
+               if (!head)
                        goto pte_unmap;
 
                if (unlikely(pte_val(pte) != pte_val(*ptep))) {
@@ -1720,8 +1744,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       head = compound_head(pmd_page(orig));
-       if (!page_cache_add_speculative(head, refs)) {
+       head = try_get_compound_head(pmd_page(orig), refs);
+       if (!head) {
                *nr -= refs;
                return 0;
        }
@@ -1758,8 +1782,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       head = compound_head(pud_page(orig));
-       if (!page_cache_add_speculative(head, refs)) {
+       head = try_get_compound_head(pud_page(orig), refs);
+       if (!head) {
                *nr -= refs;
                return 0;
        }
@@ -1795,8 +1819,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
                refs++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       head = compound_head(pgd_page(orig));
-       if (!page_cache_add_speculative(head, refs)) {
+       head = try_get_compound_head(pgd_page(orig), refs);
+       if (!head) {
                *nr -= refs;
                return 0;
        }
index 404acdcd0455d0d3dda191d994dfb27d0359104e..b6a34b32d8ac96caaea90d5a68b4d090bbc1d85a 100644 (file)
@@ -755,6 +755,21 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
        spinlock_t *ptl;
 
        ptl = pmd_lock(mm, pmd);
+       if (!pmd_none(*pmd)) {
+               if (write) {
+                       if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
+                               WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
+                               goto out_unlock;
+                       }
+                       entry = pmd_mkyoung(*pmd);
+                       entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+                       if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
+                               update_mmu_cache_pmd(vma, addr, pmd);
+               }
+
+               goto out_unlock;
+       }
+
        entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
        if (pfn_t_devmap(pfn))
                entry = pmd_mkdevmap(entry);
@@ -766,11 +781,16 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
        if (pgtable) {
                pgtable_trans_huge_deposit(mm, pmd, pgtable);
                mm_inc_nr_ptes(mm);
+               pgtable = NULL;
        }
 
        set_pmd_at(mm, addr, pmd, entry);
        update_mmu_cache_pmd(vma, addr, pmd);
+
+out_unlock:
        spin_unlock(ptl);
+       if (pgtable)
+               pte_free(mm, pgtable);
 }
 
 vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -821,6 +841,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
        spinlock_t *ptl;
 
        ptl = pud_lock(mm, pud);
+       if (!pud_none(*pud)) {
+               if (write) {
+                       if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
+                               WARN_ON_ONCE(!is_huge_zero_pud(*pud));
+                               goto out_unlock;
+                       }
+                       entry = pud_mkyoung(*pud);
+                       entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
+                       if (pudp_set_access_flags(vma, addr, pud, entry, 1))
+                               update_mmu_cache_pud(vma, addr, pud);
+               }
+               goto out_unlock;
+       }
+
        entry = pud_mkhuge(pfn_t_pud(pfn, prot));
        if (pfn_t_devmap(pfn))
                entry = pud_mkdevmap(entry);
@@ -830,6 +864,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
        }
        set_pud_at(mm, addr, pud, entry);
        update_mmu_cache_pud(vma, addr, pud);
+
+out_unlock:
        spin_unlock(ptl);
 }
 
@@ -1641,7 +1677,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        struct mm_struct *mm = tlb->mm;
        bool ret = false;
 
-       tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE);
+       tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
 
        ptl = pmd_trans_huge_lock(pmd, vma);
        if (!ptl)
@@ -1717,7 +1753,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        pmd_t orig_pmd;
        spinlock_t *ptl;
 
-       tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE);
+       tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
 
        ptl = __pmd_trans_huge_lock(pmd, vma);
        if (!ptl)
index 97b1e0290c66d48737cda50ccea6bbcc1782c8fc..641cedfc8c0fd0c3d81311ac1bb7abb936e970b3 100644 (file)
@@ -3353,7 +3353,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
         * This is a hugetlb vma, all the pte entries should point
         * to huge page.
         */
-       tlb_remove_check_page_size_change(tlb, sz);
+       tlb_change_page_size(tlb, sz);
        tlb_start_vma(tlb, vma);
 
        /*
@@ -4299,6 +4299,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
                pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
                page = pte_page(huge_ptep_get(pte));
+
+               /*
+                * Instead of doing 'try_get_page()' below in the same_page
+                * loop, just check the count once here.
+                */
+               if (unlikely(page_count(page) <= 0)) {
+                       if (pages) {
+                               spin_unlock(ptl);
+                               remainder = 0;
+                               err = -ENOMEM;
+                               break;
+                       }
+               }
 same_page:
                if (pages) {
                        pages[i] = mem_map_offset(page, pfn_offset);
index 5d1065efbd4769151a5ea5f3540f94d2dad7b63c..613dfe681e9fcd0b18907fc0dc3ddb55a0039eda 100644 (file)
@@ -2,11 +2,13 @@
 KASAN_SANITIZE := n
 UBSAN_SANITIZE_common.o := n
 UBSAN_SANITIZE_generic.o := n
+UBSAN_SANITIZE_generic_report.o := n
 UBSAN_SANITIZE_tags.o := n
 KCOV_INSTRUMENT := n
 
 CFLAGS_REMOVE_common.o = -pg
 CFLAGS_REMOVE_generic.o = -pg
+CFLAGS_REMOVE_generic_report.o = -pg
 CFLAGS_REMOVE_tags.o = -pg
 
 # Function splitter causes unnecessary splits in __asan_load1/__asan_store1
@@ -14,6 +16,7 @@ CFLAGS_REMOVE_tags.o = -pg
 
 CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
 CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
+CFLAGS_generic_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
 CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
 
 obj-$(CONFIG_KASAN) := common.o init.o report.o
index 80bbe62b16cd2427d2e3819478188d88804b4b0a..36afcf64e016fa7ef39e3c4404f6b3c89a60917f 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/types.h>
 #include <linux/vmalloc.h>
 #include <linux/bug.h>
+#include <linux/uaccess.h>
 
 #include "kasan.h"
 #include "../slab.h"
@@ -48,37 +49,28 @@ static inline int in_irqentry_text(unsigned long ptr)
                 ptr < (unsigned long)&__softirqentry_text_end);
 }
 
-static inline void filter_irq_stacks(struct stack_trace *trace)
+static inline unsigned int filter_irq_stacks(unsigned long *entries,
+                                            unsigned int nr_entries)
 {
-       int i;
+       unsigned int i;
 
-       if (!trace->nr_entries)
-               return;
-       for (i = 0; i < trace->nr_entries; i++)
-               if (in_irqentry_text(trace->entries[i])) {
+       for (i = 0; i < nr_entries; i++) {
+               if (in_irqentry_text(entries[i])) {
                        /* Include the irqentry function into the stack. */
-                       trace->nr_entries = i + 1;
-                       break;
+                       return i + 1;
                }
+       }
+       return nr_entries;
 }
 
 static inline depot_stack_handle_t save_stack(gfp_t flags)
 {
        unsigned long entries[KASAN_STACK_DEPTH];
-       struct stack_trace trace = {
-               .nr_entries = 0,
-               .entries = entries,
-               .max_entries = KASAN_STACK_DEPTH,
-               .skip = 0
-       };
+       unsigned int nr_entries;
 
-       save_stack_trace(&trace);
-       filter_irq_stacks(&trace);
-       if (trace.nr_entries != 0 &&
-           trace.entries[trace.nr_entries-1] == ULONG_MAX)
-               trace.nr_entries--;
-
-       return depot_save_stack(&trace, flags);
+       nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
+       nr_entries = filter_irq_stacks(entries, nr_entries);
+       return stack_depot_save(entries, nr_entries, flags);
 }
 
 static inline void set_track(struct kasan_track *track, gfp_t flags)
@@ -614,6 +606,15 @@ void kasan_free_shadow(const struct vm_struct *vm)
                vfree(kasan_mem_to_shadow(vm->addr));
 }
 
+extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
+
+void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
+{
+       unsigned long flags = user_access_save();
+       __kasan_report(addr, size, is_write, ip);
+       user_access_restore(flags);
+}
+
 #ifdef CONFIG_MEMORY_HOTPLUG
 static bool shadow_mapped(unsigned long addr)
 {
index 3e0c11f7d7a1ef4125d355be6096d357efb8d9d9..3ce956efa0cb804cfd964bbc725857b7aed5d7f1 100644 (file)
@@ -163,7 +163,10 @@ static inline u8 random_tag(void)
 #endif
 
 #ifndef arch_kasan_set_tag
-#define arch_kasan_set_tag(addr, tag)  ((void *)(addr))
+static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
+{
+       return addr;
+}
 #endif
 #ifndef arch_kasan_reset_tag
 #define arch_kasan_reset_tag(addr)     ((void *)(addr))
index ca9418fe9232a115d42af35af0a027598a375a13..03a44357938675c84e43d3c5c936a272a7655fde 100644 (file)
@@ -100,10 +100,11 @@ static void print_track(struct kasan_track *track, const char *prefix)
 {
        pr_err("%s by task %u:\n", prefix, track->pid);
        if (track->stack) {
-               struct stack_trace trace;
+               unsigned long *entries;
+               unsigned int nr_entries;
 
-               depot_fetch_stack(track->stack, &trace);
-               print_stack_trace(&trace, 0);
+               nr_entries = stack_depot_fetch(track->stack, &entries);
+               stack_trace_print(entries, nr_entries, 0);
        } else {
                pr_err("(stack is not available)\n");
        }
@@ -281,8 +282,7 @@ void kasan_report_invalid_free(void *object, unsigned long ip)
        end_report(&flags);
 }
 
-void kasan_report(unsigned long addr, size_t size,
-               bool is_write, unsigned long ip)
+void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
 {
        struct kasan_access_info info;
        void *tagged_addr;
index 707fa5579f66f1e1e96a5613e50ff74b92417954..e57bf810f7983ac20663b92046250cb1f6bf1b53 100644 (file)
@@ -410,11 +410,6 @@ static void print_unreferenced(struct seq_file *seq,
  */
 static void dump_object_info(struct kmemleak_object *object)
 {
-       struct stack_trace trace;
-
-       trace.nr_entries = object->trace_len;
-       trace.entries = object->trace;
-
        pr_notice("Object 0x%08lx (size %zu):\n",
                  object->pointer, object->size);
        pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
@@ -424,7 +419,7 @@ static void dump_object_info(struct kmemleak_object *object)
        pr_notice("  flags = 0x%x\n", object->flags);
        pr_notice("  checksum = %u\n", object->checksum);
        pr_notice("  backtrace:\n");
-       print_stack_trace(&trace, 4);
+       stack_trace_print(object->trace, object->trace_len, 4);
 }
 
 /*
@@ -553,15 +548,7 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali
  */
 static int __save_stack_trace(unsigned long *trace)
 {
-       struct stack_trace stack_trace;
-
-       stack_trace.max_entries = MAX_TRACE;
-       stack_trace.nr_entries = 0;
-       stack_trace.entries = trace;
-       stack_trace.skip = 2;
-       save_stack_trace(&stack_trace);
-
-       return stack_trace.nr_entries;
+       return stack_trace_save(trace, MAX_TRACE, 2);
 }
 
 /*
@@ -1401,6 +1388,7 @@ static void scan_block(void *_start, void *_end,
 /*
  * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
  */
+#ifdef CONFIG_SMP
 static void scan_large_block(void *start, void *end)
 {
        void *next;
@@ -1412,6 +1400,7 @@ static void scan_large_block(void *start, void *end)
                cond_resched();
        }
 }
+#endif
 
 /*
  * Scan a memory block corresponding to a kmemleak_object. A condition is
@@ -1529,11 +1518,6 @@ static void kmemleak_scan(void)
        }
        rcu_read_unlock();
 
-       /* data/bss scanning */
-       scan_large_block(_sdata, _edata);
-       scan_large_block(__bss_start, __bss_stop);
-       scan_large_block(__start_ro_after_init, __end_ro_after_init);
-
 #ifdef CONFIG_SMP
        /* per-cpu sections scanning */
        for_each_possible_cpu(i)
@@ -2024,13 +2008,8 @@ early_param("kmemleak", kmemleak_boot_config);
 
 static void __init print_log_trace(struct early_log *log)
 {
-       struct stack_trace trace;
-
-       trace.nr_entries = log->trace_len;
-       trace.entries = log->trace;
-
        pr_notice("Early log backtrace:\n");
-       print_stack_trace(&trace, 2);
+       stack_trace_print(log->trace, log->trace_len, 2);
 }
 
 /*
@@ -2071,6 +2050,17 @@ void __init kmemleak_init(void)
        }
        local_irq_restore(flags);
 
+       /* register the data/bss sections */
+       create_object((unsigned long)_sdata, _edata - _sdata,
+                     KMEMLEAK_GREY, GFP_ATOMIC);
+       create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
+                     KMEMLEAK_GREY, GFP_ATOMIC);
+       /* only register .data..ro_after_init if not within .data */
+       if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
+               create_object((unsigned long)__start_ro_after_init,
+                             __end_ro_after_init - __start_ro_after_init,
+                             KMEMLEAK_GREY, GFP_ATOMIC);
+
        /*
         * This is the point where tracking allocations is safe. Automatic
         * scanning is started during the late initcall. Add the early logged
index 21a7881a2db41e994c977ae49bddcf6e2ee2ffd8..bb3a4554d5d56f810f92064b268e8f0e097a9d6b 100644 (file)
@@ -328,7 +328,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
        if (pmd_trans_unstable(pmd))
                return 0;
 
-       tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
+       tlb_change_page_size(tlb, PAGE_SIZE);
        orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
        flush_tlb_batched_pending(mm);
        arch_enter_lazy_mmu_mode();
index 532e0e2a4817e36d9106634c85a95136790f5227..81a0d3914ec999efcb36fb590e75c29d059d2b24 100644 (file)
@@ -3882,6 +3882,22 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
        return &memcg->cgwb_domain;
 }
 
+/*
+ * idx can be of type enum memcg_stat_item or node_stat_item.
+ * Keep in sync with memcg_exact_page().
+ */
+static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
+{
+       long x = atomic_long_read(&memcg->stat[idx]);
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx];
+       if (x < 0)
+               x = 0;
+       return x;
+}
+
 /**
  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
  * @wb: bdi_writeback in question
@@ -3907,10 +3923,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
        struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
        struct mem_cgroup *parent;
 
-       *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
+       *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
 
        /* this should eventually include NR_UNSTABLE_NFS */
-       *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
+       *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
        *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
                                                     (1 << LRU_ACTIVE_FILE));
        *pheadroom = PAGE_COUNTER_MAX;
index 47fe250307c7aa0f553454af6f459343819f8770..36aac68446627540f2186985f58ab13ab5fb3868 100644 (file)
@@ -356,7 +356,7 @@ void free_pgd_range(struct mmu_gather *tlb,
         * We add page table cache pages with PAGE_SIZE,
         * (see pte_free_tlb()), flush the tlb if we need
         */
-       tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
+       tlb_change_page_size(tlb, PAGE_SIZE);
        pgd = pgd_offset(tlb->mm, addr);
        do {
                next = pgd_addr_end(addr, end);
@@ -1046,7 +1046,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
        pte_t *pte;
        swp_entry_t entry;
 
-       tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
+       tlb_change_page_size(tlb, PAGE_SIZE);
 again:
        init_rss_vec(rss);
        start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
@@ -1155,7 +1155,7 @@ again:
         */
        if (force_flush) {
                force_flush = 0;
-               tlb_flush_mmu_free(tlb);
+               tlb_flush_mmu(tlb);
                if (addr != end)
                        goto again;
        }
@@ -1549,10 +1549,12 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                                WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
                                goto out_unlock;
                        }
-                       entry = *pte;
-                       goto out_mkwrite;
-               } else
-                       goto out_unlock;
+                       entry = pte_mkyoung(*pte);
+                       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+                       if (ptep_set_access_flags(vma, addr, pte, entry, 1))
+                               update_mmu_cache(vma, addr, pte);
+               }
+               goto out_unlock;
        }
 
        /* Ok, finally just insert the thing.. */
@@ -1561,7 +1563,6 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
        else
                entry = pte_mkspecial(pfn_t_pte(pfn, prot));
 
-out_mkwrite:
        if (mkwrite) {
                entry = pte_mkyoung(entry);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
index f767582af4f8c0f28102f2d77d8dc6a667ec3df5..b236069ff0d823ce92a84222494d42bdfa97c20c 100644 (file)
@@ -874,6 +874,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
         */
        mem = find_memory_block(__pfn_to_section(pfn));
        nid = mem->nid;
+       put_device(&mem->dev);
 
        /* associate pfn range with the zone */
        zone = move_pfn_range(online_type, nid, pfn, nr_pages);
@@ -1576,7 +1577,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
 {
        unsigned long pfn, nr_pages;
        long offlined_pages;
-       int ret, node;
+       int ret, node, nr_isolate_pageblock;
        unsigned long flags;
        unsigned long valid_start, valid_end;
        struct zone *zone;
@@ -1602,10 +1603,11 @@ static int __ref __offline_pages(unsigned long start_pfn,
        ret = start_isolate_page_range(start_pfn, end_pfn,
                                       MIGRATE_MOVABLE,
                                       SKIP_HWPOISON | REPORT_FAILURE);
-       if (ret) {
+       if (ret < 0) {
                reason = "failure to isolate range";
                goto failed_removal;
        }
+       nr_isolate_pageblock = ret;
 
        arg.start_pfn = start_pfn;
        arg.nr_pages = nr_pages;
@@ -1657,8 +1659,16 @@ static int __ref __offline_pages(unsigned long start_pfn,
        /* Ok, all of our target is isolated.
           We cannot do rollback at this point. */
        offline_isolated_pages(start_pfn, end_pfn);
-       /* reset pagetype flags and makes migrate type to be MOVABLE */
-       undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+
+       /*
+        * Onlining will reset pagetype flags and makes migrate type
+        * MOVABLE, so just need to decrease the number of isolated
+        * pageblocks zone counter here.
+        */
+       spin_lock_irqsave(&zone->lock, flags);
+       zone->nr_isolate_pageblock -= nr_isolate_pageblock;
+       spin_unlock_irqrestore(&zone->lock, flags);
+
        /* removal success */
        adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
        zone->present_pages -= offlined_pages;
@@ -1690,12 +1700,12 @@ static int __ref __offline_pages(unsigned long start_pfn,
 
 failed_removal_isolated:
        undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+       memory_notify(MEM_CANCEL_OFFLINE, &arg);
 failed_removal:
        pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
                 (unsigned long long) start_pfn << PAGE_SHIFT,
                 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
                 reason);
-       memory_notify(MEM_CANCEL_OFFLINE, &arg);
        /* pushback to free area */
        mem_hotplug_done();
        return ret;
index af171ccb56a29713a326b1018e38215700ffcfe5..2219e747df494e5799d5e1af97d6b456907bfa43 100644 (file)
@@ -428,6 +428,13 @@ static inline bool queue_pages_required(struct page *page,
        return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
 }
 
+/*
+ * queue_pages_pmd() has three possible return values:
+ * 1 - pages are placed on the right node or queued successfully.
+ * 0 - THP was split.
+ * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
+ *        page was already on a node that does not follow the policy.
+ */
 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
                                unsigned long end, struct mm_walk *walk)
 {
@@ -437,7 +444,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
        unsigned long flags;
 
        if (unlikely(is_pmd_migration_entry(*pmd))) {
-               ret = 1;
+               ret = -EIO;
                goto unlock;
        }
        page = pmd_page(*pmd);
@@ -454,8 +461,15 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
        ret = 1;
        flags = qp->flags;
        /* go to thp migration */
-       if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+       if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+               if (!vma_migratable(walk->vma)) {
+                       ret = -EIO;
+                       goto unlock;
+               }
+
                migrate_page_add(page, qp->pagelist, flags);
+       } else
+               ret = -EIO;
 unlock:
        spin_unlock(ptl);
 out:
@@ -480,8 +494,10 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
        ptl = pmd_trans_huge_lock(pmd, vma);
        if (ptl) {
                ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
-               if (ret)
+               if (ret > 0)
                        return 0;
+               else if (ret < 0)
+                       return ret;
        }
 
        if (pmd_trans_unstable(pmd))
@@ -502,11 +518,16 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
                        continue;
                if (!queue_pages_required(page, qp))
                        continue;
-               migrate_page_add(page, qp->pagelist, flags);
+               if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+                       if (!vma_migratable(vma))
+                               break;
+                       migrate_page_add(page, qp->pagelist, flags);
+               } else
+                       break;
        }
        pte_unmap_unlock(pte - 1, ptl);
        cond_resched();
-       return 0;
+       return addr != end ? -EIO : 0;
 }
 
 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
@@ -576,7 +597,12 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
        unsigned long endvma = vma->vm_end;
        unsigned long flags = qp->flags;
 
-       if (!vma_migratable(vma))
+       /*
+        * Need check MPOL_MF_STRICT to return -EIO if possible
+        * regardless of vma_migratable
+        */
+       if (!vma_migratable(vma) &&
+           !(flags & MPOL_MF_STRICT))
                return 1;
 
        if (endvma > end)
@@ -603,7 +629,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
        }
 
        /* queue pages from current vma */
-       if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+       if (flags & MPOL_MF_VALID)
                return 0;
        return 1;
 }
index ac6f4939bb5975a2cab6e78419562c1529e12219..663a5449367a4204e937491d2d9032b0a3768bdf 100644 (file)
@@ -248,10 +248,8 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
                                pte = swp_entry_to_pte(entry);
                        } else if (is_device_public_page(new)) {
                                pte = pte_mkdevmap(pte);
-                               flush_dcache_page(new);
                        }
-               } else
-                       flush_dcache_page(new);
+               }
 
 #ifdef CONFIG_HUGETLB_PAGE
                if (PageHuge(new)) {
@@ -995,6 +993,13 @@ static int move_to_new_page(struct page *newpage, struct page *page,
                 */
                if (!PageMappingFlags(page))
                        page->mapping = NULL;
+
+               if (unlikely(is_zone_device_page(newpage))) {
+                       if (is_device_public_page(newpage))
+                               flush_dcache_page(newpage);
+               } else
+                       flush_dcache_page(newpage);
+
        }
 out:
        return rc;
index 41eb48d9b5276733e48b95f1addfcb228becd993..bd7b9f293b391f22b85810e48bc7c0679b217f05 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -45,6 +45,7 @@
 #include <linux/moduleparam.h>
 #include <linux/pkeys.h>
 #include <linux/oom.h>
+#include <linux/sched/mm.h>
 
 #include <linux/uaccess.h>
 #include <asm/cacheflush.h>
@@ -2525,7 +2526,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
        vma = find_vma_prev(mm, addr, &prev);
        if (vma && (vma->vm_start <= addr))
                return vma;
-       if (!prev || expand_stack(prev, addr))
+       /* don't alter vm_end if the coredump is running */
+       if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
                return NULL;
        if (prev->vm_flags & VM_LOCKED)
                populate_vma_page_range(prev, addr, prev->vm_end, NULL);
@@ -2551,6 +2553,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
                return vma;
        if (!(vma->vm_flags & VM_GROWSDOWN))
                return NULL;
+       /* don't alter vm_start if the coredump is running */
+       if (!mmget_still_valid(mm))
+               return NULL;
        start = vma->vm_start;
        if (expand_stack(vma, addr))
                return NULL;
index f2f03c65580707669207f2b3ae17ce8227c2b43f..99740e1dd27304b31097eb3deedc309a6c398490 100644 (file)
@@ -11,7 +11,7 @@
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
 
-#ifdef HAVE_GENERIC_MMU_GATHER
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
 
 static bool tlb_next_batch(struct mmu_gather *tlb)
 {
@@ -41,35 +41,10 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
        return true;
 }
 
-void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
-                               unsigned long start, unsigned long end)
-{
-       tlb->mm = mm;
-
-       /* Is it from 0 to ~0? */
-       tlb->fullmm     = !(start | (end+1));
-       tlb->need_flush_all = 0;
-       tlb->local.next = NULL;
-       tlb->local.nr   = 0;
-       tlb->local.max  = ARRAY_SIZE(tlb->__pages);
-       tlb->active     = &tlb->local;
-       tlb->batch_count = 0;
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-       tlb->batch = NULL;
-#endif
-       tlb->page_size = 0;
-
-       __tlb_reset_range(tlb);
-}
-
-void tlb_flush_mmu_free(struct mmu_gather *tlb)
+static void tlb_batch_pages_flush(struct mmu_gather *tlb)
 {
        struct mmu_gather_batch *batch;
 
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-       tlb_table_flush(tlb);
-#endif
        for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
                free_pages_and_swap_cache(batch->pages, batch->nr);
                batch->nr = 0;
@@ -77,31 +52,10 @@ void tlb_flush_mmu_free(struct mmu_gather *tlb)
        tlb->active = &tlb->local;
 }
 
-void tlb_flush_mmu(struct mmu_gather *tlb)
-{
-       tlb_flush_mmu_tlbonly(tlb);
-       tlb_flush_mmu_free(tlb);
-}
-
-/* tlb_finish_mmu
- *     Called at the end of the shootdown operation to free up any resources
- *     that were required.
- */
-void arch_tlb_finish_mmu(struct mmu_gather *tlb,
-               unsigned long start, unsigned long end, bool force)
+static void tlb_batch_list_free(struct mmu_gather *tlb)
 {
        struct mmu_gather_batch *batch, *next;
 
-       if (force) {
-               __tlb_reset_range(tlb);
-               __tlb_adjust_range(tlb, start, end - start);
-       }
-
-       tlb_flush_mmu(tlb);
-
-       /* keep the page table cache within bounds */
-       check_pgt_cache();
-
        for (batch = tlb->local.next; batch; batch = next) {
                next = batch->next;
                free_pages((unsigned long)batch, 0);
@@ -109,19 +63,15 @@ void arch_tlb_finish_mmu(struct mmu_gather *tlb,
        tlb->local.next = NULL;
 }
 
-/* __tlb_remove_page
- *     Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
- *     handling the additional races in SMP caused by other CPUs caching valid
- *     mappings in their TLBs. Returns the number of free page slots left.
- *     When out of page slots we must call tlb_flush_mmu().
- *returns true if the caller should flush.
- */
 bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
 {
        struct mmu_gather_batch *batch;
 
        VM_BUG_ON(!tlb->end);
+
+#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
        VM_WARN_ON(tlb->page_size != page_size);
+#endif
 
        batch = tlb->active;
        /*
@@ -139,7 +89,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
        return false;
 }
 
-#endif /* HAVE_GENERIC_MMU_GATHER */
+#endif /* HAVE_MMU_GATHER_NO_GATHER */
 
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
 
@@ -152,7 +102,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
  */
 static inline void tlb_table_invalidate(struct mmu_gather *tlb)
 {
-#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
+#ifndef CONFIG_HAVE_RCU_TABLE_NO_INVALIDATE
        /*
         * Invalidate page-table caches used by hardware walkers. Then we still
         * need to RCU-sched wait while freeing the pages because software
@@ -193,7 +143,7 @@ static void tlb_remove_table_rcu(struct rcu_head *head)
        free_page((unsigned long)batch);
 }
 
-void tlb_table_flush(struct mmu_gather *tlb)
+static void tlb_table_flush(struct mmu_gather *tlb)
 {
        struct mmu_table_batch **batch = &tlb->batch;
 
@@ -225,6 +175,22 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 
 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
 
+static void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+       tlb_table_flush(tlb);
+#endif
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+       tlb_batch_pages_flush(tlb);
+#endif
+}
+
+void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+       tlb_flush_mmu_tlbonly(tlb);
+       tlb_flush_mmu_free(tlb);
+}
+
 /**
  * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
  * @tlb: the mmu_gather structure to initialize
@@ -240,10 +206,40 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
                        unsigned long start, unsigned long end)
 {
-       arch_tlb_gather_mmu(tlb, mm, start, end);
+       tlb->mm = mm;
+
+       /* Is it from 0 to ~0? */
+       tlb->fullmm     = !(start | (end+1));
+
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+       tlb->need_flush_all = 0;
+       tlb->local.next = NULL;
+       tlb->local.nr   = 0;
+       tlb->local.max  = ARRAY_SIZE(tlb->__pages);
+       tlb->active     = &tlb->local;
+       tlb->batch_count = 0;
+#endif
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+       tlb->batch = NULL;
+#endif
+#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+       tlb->page_size = 0;
+#endif
+
+       __tlb_reset_range(tlb);
        inc_tlb_flush_pending(tlb->mm);
 }
 
+/**
+ * tlb_finish_mmu - finish an mmu_gather structure
+ * @tlb: the mmu_gather structure to finish
+ * @start: start of the region that will be removed from the page-table
+ * @end: end of the region that will be removed from the page-table
+ *
+ * Called at the end of the shootdown operation to free up any resources that
+ * were required.
+ */
 void tlb_finish_mmu(struct mmu_gather *tlb,
                unsigned long start, unsigned long end)
 {
@@ -254,8 +250,17 @@ void tlb_finish_mmu(struct mmu_gather *tlb,
         * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
         * forcefully if we detect parallel PTE batching threads.
         */
-       bool force = mm_tlb_flush_nested(tlb->mm);
+       if (mm_tlb_flush_nested(tlb->mm)) {
+               __tlb_reset_range(tlb);
+               __tlb_adjust_range(tlb, start, end - start);
+       }
 
-       arch_tlb_finish_mmu(tlb, start, end, force);
+       tlb_flush_mmu(tlb);
+
+       /* keep the page table cache within bounds */
+       check_pgt_cache();
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+       tlb_batch_list_free(tlb);
+#endif
        dec_tlb_flush_pending(tlb->mm);
 }
index 03fcf73d47dabde0987f3542c3c87fca33bf5a5d..c02cff1ed56eb231fef3a5ffacfd9aa64d8dda54 100644 (file)
@@ -266,7 +266,20 @@ compound_page_dtor * const compound_page_dtors[] = {
 
 int min_free_kbytes = 1024;
 int user_min_free_kbytes = -1;
+#ifdef CONFIG_DISCONTIGMEM
+/*
+ * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
+ * are not on separate NUMA nodes. Functionally this works but with
+ * watermark_boost_factor, it can reclaim prematurely as the ranges can be
+ * quite small. By default, do not boost watermarks on discontigmem as in
+ * many cases very high-order allocations like THP are likely to be
+ * unsupported and the premature reclaim offsets the advantage of long-term
+ * fragmentation avoidance.
+ */
+int watermark_boost_factor __read_mostly;
+#else
 int watermark_boost_factor __read_mostly = 15000;
+#endif
 int watermark_scale_factor = 10;
 
 static unsigned long nr_kernel_pages __initdata;
@@ -3419,8 +3432,11 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
                alloc_flags |= ALLOC_KSWAPD;
 
 #ifdef CONFIG_ZONE_DMA32
+       if (!zone)
+               return alloc_flags;
+
        if (zone_idx(zone) != ZONE_NORMAL)
-               goto out;
+               return alloc_flags;
 
        /*
         * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
@@ -3429,9 +3445,9 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
         */
        BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
        if (nr_online_nodes > 1 && !populated_zone(--zone))
-               goto out;
+               return alloc_flags;
 
-out:
+       alloc_flags |= ALLOC_NOFRAGMENT;
 #endif /* CONFIG_ZONE_DMA32 */
        return alloc_flags;
 }
@@ -3773,11 +3789,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        memalloc_noreclaim_restore(noreclaim_flag);
        psi_memstall_leave(&pflags);
 
-       if (*compact_result <= COMPACT_INACTIVE) {
-               WARN_ON_ONCE(page);
-               return NULL;
-       }
-
        /*
         * At least in one zone compaction wasn't deferred or skipped, so let's
         * count a compaction stall
@@ -8005,7 +8016,10 @@ void *__init alloc_large_system_hash(const char *tablename,
 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                         int migratetype, int flags)
 {
-       unsigned long pfn, iter, found;
+       unsigned long found;
+       unsigned long iter = 0;
+       unsigned long pfn = page_to_pfn(page);
+       const char *reason = "unmovable page";
 
        /*
         * TODO we could make this much more efficient by not checking every
@@ -8015,17 +8029,20 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
         * can still lead to having bootmem allocations in zone_movable.
         */
 
-       /*
-        * CMA allocations (alloc_contig_range) really need to mark isolate
-        * CMA pageblocks even when they are not movable in fact so consider
-        * them movable here.
-        */
-       if (is_migrate_cma(migratetype) &&
-                       is_migrate_cma(get_pageblock_migratetype(page)))
-               return false;
+       if (is_migrate_cma_page(page)) {
+               /*
+                * CMA allocations (alloc_contig_range) really need to mark
+                * isolate CMA pageblocks even when they are not movable in fact
+                * so consider them movable here.
+                */
+               if (is_migrate_cma(migratetype))
+                       return false;
 
-       pfn = page_to_pfn(page);
-       for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
+               reason = "CMA page";
+               goto unmovable;
+       }
+
+       for (found = 0; iter < pageblock_nr_pages; iter++) {
                unsigned long check = pfn + iter;
 
                if (!pfn_valid_within(check))
@@ -8105,7 +8122,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
 unmovable:
        WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
        if (flags & REPORT_FAILURE)
-               dump_page(pfn_to_page(pfn+iter), "unmovable page");
+               dump_page(pfn_to_page(pfn + iter), reason);
        return true;
 }
 
@@ -8233,7 +8250,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 
        ret = start_isolate_page_range(pfn_max_align_down(start),
                                       pfn_max_align_up(end), migratetype, 0);
-       if (ret)
+       if (ret < 0)
                return ret;
 
        /*
index ce323e56b34d6bc43a9e16cc9054f35290bb0b63..019280712e1b8b7e075b51573b5c56d07aef3922 100644 (file)
@@ -59,7 +59,8 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
         * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
         * We just check MOVABLE pages.
         */
-       if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, flags))
+       if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
+                                isol_flags))
                ret = 0;
 
        /*
@@ -160,27 +161,36 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
        return NULL;
 }
 
-/*
- * start_isolate_page_range() -- make page-allocation-type of range of pages
- * to be MIGRATE_ISOLATE.
- * @start_pfn: The lower PFN of the range to be isolated.
- * @end_pfn: The upper PFN of the range to be isolated.
- * @migratetype: migrate type to set in error recovery.
+/**
+ * start_isolate_page_range() - make page-allocation-type of range of pages to
+ * be MIGRATE_ISOLATE.
+ * @start_pfn:         The lower PFN of the range to be isolated.
+ * @end_pfn:           The upper PFN of the range to be isolated.
+ *                     start_pfn/end_pfn must be aligned to pageblock_order.
+ * @migratetype:       Migrate type to set in error recovery.
+ * @flags:             The following flags are allowed (they can be combined in
+ *                     a bit mask)
+ *                     SKIP_HWPOISON - ignore hwpoison pages
+ *                     REPORT_FAILURE - report details about the failure to
+ *                     isolate the range
  *
  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
  * the range will never be allocated. Any free pages and pages freed in the
- * future will not be allocated again.
- *
- * start_pfn/end_pfn must be aligned to pageblock_order.
- * Return 0 on success and -EBUSY if any part of range cannot be isolated.
+ * future will not be allocated again. If specified range includes migrate types
+ * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
+ * pages in the range finally, the caller have to free all pages in the range.
+ * test_page_isolated() can be used for test it.
  *
  * There is no high level synchronization mechanism that prevents two threads
- * from trying to isolate overlapping ranges.  If this happens, one thread
+ * from trying to isolate overlapping ranges. If this happens, one thread
  * will notice pageblocks in the overlapping range already set to isolate.
  * This happens in set_migratetype_isolate, and set_migratetype_isolate
- * returns an error.  We then clean up by restoring the migration type on
- * pageblocks we may have modified and return -EBUSY to caller.  This
+ * returns an error. We then clean up by restoring the migration type on
+ * pageblocks we may have modified and return -EBUSY to caller. This
  * prevents two threads from simultaneously working on overlapping ranges.
+ *
+ * Return: the number of isolated pageblocks on success and -EBUSY if any part
+ * of range cannot be isolated.
  */
 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
                             unsigned migratetype, int flags)
@@ -188,6 +198,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
        unsigned long pfn;
        unsigned long undo_pfn;
        struct page *page;
+       int nr_isolate_pageblock = 0;
 
        BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
        BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
@@ -196,13 +207,15 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
             pfn < end_pfn;
             pfn += pageblock_nr_pages) {
                page = __first_valid_page(pfn, pageblock_nr_pages);
-               if (page &&
-                   set_migratetype_isolate(page, migratetype, flags)) {
-                       undo_pfn = pfn;
-                       goto undo;
+               if (page) {
+                       if (set_migratetype_isolate(page, migratetype, flags)) {
+                               undo_pfn = pfn;
+                               goto undo;
+                       }
+                       nr_isolate_pageblock++;
                }
        }
-       return 0;
+       return nr_isolate_pageblock;
 undo:
        for (pfn = start_pfn;
             pfn < undo_pfn;
index 925b6f44a444afcddc2a634b8fafb3626eaeb16d..addcbb2ae4e4f7e8efa3bc1df75a2f77bf9a094a 100644 (file)
@@ -58,15 +58,10 @@ static bool need_page_owner(void)
 static __always_inline depot_stack_handle_t create_dummy_stack(void)
 {
        unsigned long entries[4];
-       struct stack_trace dummy;
+       unsigned int nr_entries;
 
-       dummy.nr_entries = 0;
-       dummy.max_entries = ARRAY_SIZE(entries);
-       dummy.entries = &entries[0];
-       dummy.skip = 0;
-
-       save_stack_trace(&dummy);
-       return depot_save_stack(&dummy, GFP_KERNEL);
+       nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
+       return stack_depot_save(entries, nr_entries, GFP_KERNEL);
 }
 
 static noinline void register_dummy_stack(void)
@@ -120,49 +115,39 @@ void __reset_page_owner(struct page *page, unsigned int order)
        }
 }
 
-static inline bool check_recursive_alloc(struct stack_trace *trace,
-                                       unsigned long ip)
+static inline bool check_recursive_alloc(unsigned long *entries,
+                                        unsigned int nr_entries,
+                                        unsigned long ip)
 {
-       int i;
-
-       if (!trace->nr_entries)
-               return false;
+       unsigned int i;
 
-       for (i = 0; i < trace->nr_entries; i++) {
-               if (trace->entries[i] == ip)
+       for (i = 0; i < nr_entries; i++) {
+               if (entries[i] == ip)
                        return true;
        }
-
        return false;
 }
 
 static noinline depot_stack_handle_t save_stack(gfp_t flags)
 {
        unsigned long entries[PAGE_OWNER_STACK_DEPTH];
-       struct stack_trace trace = {
-               .nr_entries = 0,
-               .entries = entries,
-               .max_entries = PAGE_OWNER_STACK_DEPTH,
-               .skip = 2
-       };
        depot_stack_handle_t handle;
+       unsigned int nr_entries;
 
-       save_stack_trace(&trace);
-       if (trace.nr_entries != 0 &&
-           trace.entries[trace.nr_entries-1] == ULONG_MAX)
-               trace.nr_entries--;
+       nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
 
        /*
-        * We need to check recursion here because our request to stackdepot
-        * could trigger memory allocation to save new entry. New memory
-        * allocation would reach here and call depot_save_stack() again
-        * if we don't catch it. There is still not enough memory in stackdepot
-        * so it would try to allocate memory again and loop forever.
+        * We need to check recursion here because our request to
+        * stackdepot could trigger memory allocation to save new
+        * entry. New memory allocation would reach here and call
+        * stack_depot_save_entries() again if we don't catch it. There is
+        * still not enough memory in stackdepot so it would try to
+        * allocate memory again and loop forever.
         */
-       if (check_recursive_alloc(&trace, _RET_IP_))
+       if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
                return dummy_handle;
 
-       handle = depot_save_stack(&trace, flags);
+       handle = stack_depot_save(entries, nr_entries, flags);
        if (!handle)
                handle = failure_handle;
 
@@ -340,16 +325,10 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
                struct page *page, struct page_owner *page_owner,
                depot_stack_handle_t handle)
 {
-       int ret;
-       int pageblock_mt, page_mt;
+       int ret, pageblock_mt, page_mt;
+       unsigned long *entries;
+       unsigned int nr_entries;
        char *kbuf;
-       unsigned long entries[PAGE_OWNER_STACK_DEPTH];
-       struct stack_trace trace = {
-               .nr_entries = 0,
-               .entries = entries,
-               .max_entries = PAGE_OWNER_STACK_DEPTH,
-               .skip = 0
-       };
 
        count = min_t(size_t, count, PAGE_SIZE);
        kbuf = kmalloc(count, GFP_KERNEL);
@@ -378,8 +357,8 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
        if (ret >= count)
                goto err;
 
-       depot_fetch_stack(handle, &trace);
-       ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
+       nr_entries = stack_depot_fetch(handle, &entries);
+       ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
        if (ret >= count)
                goto err;
 
@@ -410,14 +389,9 @@ void __dump_page_owner(struct page *page)
 {
        struct page_ext *page_ext = lookup_page_ext(page);
        struct page_owner *page_owner;
-       unsigned long entries[PAGE_OWNER_STACK_DEPTH];
-       struct stack_trace trace = {
-               .nr_entries = 0,
-               .entries = entries,
-               .max_entries = PAGE_OWNER_STACK_DEPTH,
-               .skip = 0
-       };
        depot_stack_handle_t handle;
+       unsigned long *entries;
+       unsigned int nr_entries;
        gfp_t gfp_mask;
        int mt;
 
@@ -441,10 +415,10 @@ void __dump_page_owner(struct page *page)
                return;
        }
 
-       depot_fetch_stack(handle, &trace);
+       nr_entries = stack_depot_fetch(handle, &entries);
        pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
                 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
-       print_stack_trace(&trace, 0);
+       stack_trace_print(entries, nr_entries, 0);
 
        if (page_owner->last_migrate_reason != -1)
                pr_alert("page has been migrated, last migrate reason: %s\n",
index 2e6fc8d552c96d58f615be2fd3addadefd01f5c0..68dd2e7e73b5f29b2d3dfd2bd9e4b984244d7dc9 100644 (file)
@@ -2567,8 +2567,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
                ai->groups[group].base_offset = areas[group] - base;
        }
 
-       pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
-               PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
+       pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
+               PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
                ai->dyn_size, ai->unit_size);
 
        rc = pcpu_setup_first_chunk(ai, base);
@@ -2692,8 +2692,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
        }
 
        /* we're ready, commit */
-       pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
-               unit_pages, psize_str, vm.addr, ai->static_size,
+       pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
+               unit_pages, psize_str, ai->static_size,
                ai->reserved_size, ai->dyn_size);
 
        rc = pcpu_setup_first_chunk(ai, vm.addr);
index b3db3779a30a1f1fbe2d5cda71ddc402601926ef..2275a0ff7c3051d9674ee59b153451c6078741c3 100644 (file)
@@ -1081,9 +1081,14 @@ static void shmem_evict_inode(struct inode *inode)
                        }
                        spin_unlock(&sbinfo->shrinklist_lock);
                }
-               if (!list_empty(&info->swaplist)) {
+               while (!list_empty(&info->swaplist)) {
+                       /* Wait while shmem_unuse() is scanning this inode... */
+                       wait_var_event(&info->stop_eviction,
+                                      !atomic_read(&info->stop_eviction));
                        mutex_lock(&shmem_swaplist_mutex);
-                       list_del_init(&info->swaplist);
+                       /* ...but beware of the race if we peeked too early */
+                       if (!atomic_read(&info->stop_eviction))
+                               list_del_init(&info->swaplist);
                        mutex_unlock(&shmem_swaplist_mutex);
                }
        }
@@ -1099,10 +1104,11 @@ extern struct swap_info_struct *swap_info[];
 static int shmem_find_swap_entries(struct address_space *mapping,
                                   pgoff_t start, unsigned int nr_entries,
                                   struct page **entries, pgoff_t *indices,
-                                  bool frontswap)
+                                  unsigned int type, bool frontswap)
 {
        XA_STATE(xas, &mapping->i_pages, start);
        struct page *page;
+       swp_entry_t entry;
        unsigned int ret = 0;
 
        if (!nr_entries)
@@ -1116,13 +1122,12 @@ static int shmem_find_swap_entries(struct address_space *mapping,
                if (!xa_is_value(page))
                        continue;
 
-               if (frontswap) {
-                       swp_entry_t entry = radix_to_swp_entry(page);
-
-                       if (!frontswap_test(swap_info[swp_type(entry)],
-                                           swp_offset(entry)))
-                               continue;
-               }
+               entry = radix_to_swp_entry(page);
+               if (swp_type(entry) != type)
+                       continue;
+               if (frontswap &&
+                   !frontswap_test(swap_info[type], swp_offset(entry)))
+                       continue;
 
                indices[ret] = xas.xa_index;
                entries[ret] = page;
@@ -1194,7 +1199,7 @@ static int shmem_unuse_inode(struct inode *inode, unsigned int type,
 
                pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
                                                  pvec.pages, indices,
-                                                 frontswap);
+                                                 type, frontswap);
                if (pvec.nr == 0) {
                        ret = 0;
                        break;
@@ -1227,36 +1232,27 @@ int shmem_unuse(unsigned int type, bool frontswap,
                unsigned long *fs_pages_to_unuse)
 {
        struct shmem_inode_info *info, *next;
-       struct inode *inode;
-       struct inode *prev_inode = NULL;
        int error = 0;
 
        if (list_empty(&shmem_swaplist))
                return 0;
 
        mutex_lock(&shmem_swaplist_mutex);
-
-       /*
-        * The extra refcount on the inode is necessary to safely dereference
-        * p->next after re-acquiring the lock. New shmem inodes with swap
-        * get added to the end of the list and we will scan them all.
-        */
        list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
                if (!info->swapped) {
                        list_del_init(&info->swaplist);
                        continue;
                }
-
-               inode = igrab(&info->vfs_inode);
-               if (!inode)
-                       continue;
-
+               /*
+                * Drop the swaplist mutex while searching the inode for swap;
+                * but before doing so, make sure shmem_evict_inode() will not
+                * remove placeholder inode from swaplist, nor let it be freed
+                * (igrab() would protect from unlink, but not from unmount).
+                */
+               atomic_inc(&info->stop_eviction);
                mutex_unlock(&shmem_swaplist_mutex);
-               if (prev_inode)
-                       iput(prev_inode);
-               prev_inode = inode;
 
-               error = shmem_unuse_inode(inode, type, frontswap,
+               error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
                                          fs_pages_to_unuse);
                cond_resched();
 
@@ -1264,14 +1260,13 @@ int shmem_unuse(unsigned int type, bool frontswap,
                next = list_next_entry(info, swaplist);
                if (!info->swapped)
                        list_del_init(&info->swaplist);
+               if (atomic_dec_and_test(&info->stop_eviction))
+                       wake_up_var(&info->stop_eviction);
                if (error)
                        break;
        }
        mutex_unlock(&shmem_swaplist_mutex);
 
-       if (prev_inode)
-               iput(prev_inode);
-
        return error;
 }
 
@@ -2238,6 +2233,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
                info = SHMEM_I(inode);
                memset(info, 0, (char *)inode - (char *)info);
                spin_lock_init(&info->lock);
+               atomic_set(&info->stop_eviction, 0);
                info->seals = F_SEAL_SEAL;
                info->flags = flags & VM_NORESERVE;
                INIT_LIST_HEAD(&info->shrinklist);
index 28652e4218e0c1e5da82e2094d5ec43046e9b472..9142ee99249327f22224b32c5805002ec4615dd3 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2115,6 +2115,8 @@ done:
        cachep->allocflags = __GFP_COMP;
        if (flags & SLAB_CACHE_DMA)
                cachep->allocflags |= GFP_DMA;
+       if (flags & SLAB_CACHE_DMA32)
+               cachep->allocflags |= GFP_DMA32;
        if (flags & SLAB_RECLAIM_ACCOUNT)
                cachep->allocflags |= __GFP_RECLAIMABLE;
        cachep->size = size;
@@ -2372,7 +2374,6 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
                /* Slab management obj is off-slab. */
                freelist = kmem_cache_alloc_node(cachep->freelist_cache,
                                              local_flags, nodeid);
-               freelist = kasan_reset_tag(freelist);
                if (!freelist)
                        return NULL;
        } else {
@@ -4306,7 +4307,8 @@ static void show_symbol(struct seq_file *m, unsigned long address)
 
 static int leaks_show(struct seq_file *m, void *p)
 {
-       struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
+       struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
+                                              root_caches_node);
        struct page *page;
        struct kmem_cache_node *n;
        const char *name;
index e5e6658eeacca81c694ccef400d19bbcd138d6ab..43ac818b8592bc472b4b67e19831b404cc798aca 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -127,7 +127,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
 
 
 /* Legal flag mask for kmem_cache_create(), for various configurations */
-#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
+#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
+                        SLAB_CACHE_DMA32 | SLAB_PANIC | \
                         SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
 
 #if defined(CONFIG_DEBUG_SLAB)
index 03eeb8b7b4b1d5d9fc0a395459478c79ad8a2656..58251ba63e4a19fb9262c6adb59831075a858dd5 100644 (file)
@@ -53,7 +53,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
                SLAB_FAILSLAB | SLAB_KASAN)
 
 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
-                        SLAB_ACCOUNT)
+                        SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
 
 /*
  * Merge control. If this is set then no merging of slab caches will occur.
index 1b08fbcb7e61fbcc5fa84738dc09e88050d2bd2b..6b28cd2b5a58c9f09f81beae1972d9a229ec65e2 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -552,31 +552,22 @@ static void set_track(struct kmem_cache *s, void *object,
 
        if (addr) {
 #ifdef CONFIG_STACKTRACE
-               struct stack_trace trace;
-               int i;
+               unsigned int nr_entries;
 
-               trace.nr_entries = 0;
-               trace.max_entries = TRACK_ADDRS_COUNT;
-               trace.entries = p->addrs;
-               trace.skip = 3;
                metadata_access_enable();
-               save_stack_trace(&trace);
+               nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3);
                metadata_access_disable();
 
-               /* See rant in lockdep.c */
-               if (trace.nr_entries != 0 &&
-                   trace.entries[trace.nr_entries - 1] == ULONG_MAX)
-                       trace.nr_entries--;
-
-               for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
-                       p->addrs[i] = 0;
+               if (nr_entries < TRACK_ADDRS_COUNT)
+                       p->addrs[nr_entries] = 0;
 #endif
                p->addr = addr;
                p->cpu = smp_processor_id();
                p->pid = current->pid;
                p->when = jiffies;
-       } else
+       } else {
                memset(p, 0, sizeof(struct track));
+       }
 }
 
 static void init_tracking(struct kmem_cache *s, void *object)
@@ -3589,6 +3580,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
        if (s->flags & SLAB_CACHE_DMA)
                s->allocflags |= GFP_DMA;
 
+       if (s->flags & SLAB_CACHE_DMA32)
+               s->allocflags |= GFP_DMA32;
+
        if (s->flags & SLAB_RECLAIM_ACCOUNT)
                s->allocflags |= __GFP_RECLAIMABLE;
 
@@ -5679,6 +5673,8 @@ static char *create_unique_id(struct kmem_cache *s)
         */
        if (s->flags & SLAB_CACHE_DMA)
                *p++ = 'd';
+       if (s->flags & SLAB_CACHE_DMA32)
+               *p++ = 'D';
        if (s->flags & SLAB_RECLAIM_ACCOUNT)
                *p++ = 'a';
        if (s->flags & SLAB_CONSISTENCY_CHECKS)
index 69904aa6165bf13b89a44d6abf84609fea2076ba..56e057c432f9663439c4cfd38f8cad8f2e6e2c3d 100644 (file)
@@ -567,7 +567,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
 }
 
 #ifdef CONFIG_MEMORY_HOTREMOVE
-/* Mark all memory sections within the pfn range as online */
+/* Mark all memory sections within the pfn range as offline */
 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
 {
        unsigned long pfn;
index 2b8d9c3fbb47fd7a5c2a711dad73c5889dfe0bb2..cf63b5f01adf7da9d1def93f8763b50243adc698 100644 (file)
@@ -2023,7 +2023,6 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
  * If the boolean frontswap is true, only unuse pages_to_unuse pages;
  * pages_to_unuse==0 means all pages; ignored if frontswap is false
  */
-#define SWAP_UNUSE_MAX_TRIES 3
 int try_to_unuse(unsigned int type, bool frontswap,
                 unsigned long pages_to_unuse)
 {
@@ -2035,7 +2034,6 @@ int try_to_unuse(unsigned int type, bool frontswap,
        struct page *page;
        swp_entry_t entry;
        unsigned int i;
-       int retries = 0;
 
        if (!si->inuse_pages)
                return 0;
@@ -2053,11 +2051,9 @@ retry:
 
        spin_lock(&mmlist_lock);
        p = &init_mm.mmlist;
-       while ((p = p->next) != &init_mm.mmlist) {
-               if (signal_pending(current)) {
-                       retval = -EINTR;
-                       break;
-               }
+       while (si->inuse_pages &&
+              !signal_pending(current) &&
+              (p = p->next) != &init_mm.mmlist) {
 
                mm = list_entry(p, struct mm_struct, mmlist);
                if (!mmget_not_zero(mm))
@@ -2084,7 +2080,9 @@ retry:
        mmput(prev_mm);
 
        i = 0;
-       while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
+       while (si->inuse_pages &&
+              !signal_pending(current) &&
+              (i = find_next_to_unuse(si, i, frontswap)) != 0) {
 
                entry = swp_entry(type, i);
                page = find_get_page(swap_address_space(entry), i);
@@ -2117,14 +2115,18 @@ retry:
         * If yes, we would need to do retry the unuse logic again.
         * Under global memory pressure, swap entries can be reinserted back
         * into process space after the mmlist loop above passes over them.
-        * Its not worth continuosuly retrying to unuse the swap in this case.
-        * So we try SWAP_UNUSE_MAX_TRIES times.
+        *
+        * Limit the number of retries? No: when mmget_not_zero() above fails,
+        * that mm is likely to be freeing swap from exit_mmap(), which proceeds
+        * at its own independent pace; and even shmem_writepage() could have
+        * been preempted after get_swap_page(), temporarily hiding that swap.
+        * It's easy and robust (though cpu-intensive) just to keep retrying.
         */
-       if (++retries >= SWAP_UNUSE_MAX_TRIES)
-               retval = -EBUSY;
-       else if (si->inuse_pages)
-               goto retry;
-
+       if (si->inuse_pages) {
+               if (!signal_pending(current))
+                       goto retry;
+               retval = -EINTR;
+       }
 out:
        return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval;
 }
index d559bde497a9b9690f328f6f5deea3eb17de474b..43a2984bccaab6525afb6e7c61ce8d903f45f24e 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -204,7 +204,7 @@ EXPORT_SYMBOL(vmemdup_user);
  * @s: The string to duplicate
  * @n: Maximum number of bytes to copy, including the trailing NUL.
  *
- * Return: newly allocated copy of @s or %NULL in case of error
+ * Return: newly allocated copy of @s or an ERR_PTR() in case of error
  */
 char *strndup_user(const char __user *s, long n)
 {
index a5ad0b35ab8e3e6bea056baf2e5d8729f1003326..a815f73ee4d5b2d1a9872cca19db055845499aa1 100644 (file)
@@ -2176,7 +2176,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
  *   10TB     320        32GB
  */
 static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
-                                struct mem_cgroup *memcg,
                                 struct scan_control *sc, bool actual_reclaim)
 {
        enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
@@ -2197,16 +2196,12 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
        inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
        active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
 
-       if (memcg)
-               refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
-       else
-               refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
-
        /*
         * When refaults are being observed, it means a new workingset
         * is being established. Disable active list protection to get
         * rid of the stale workingset quickly.
         */
+       refaults = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE);
        if (file && actual_reclaim && lruvec->refaults != refaults) {
                inactive_ratio = 0;
        } else {
@@ -2227,12 +2222,10 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
 }
 
 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
-                                struct lruvec *lruvec, struct mem_cgroup *memcg,
-                                struct scan_control *sc)
+                                struct lruvec *lruvec, struct scan_control *sc)
 {
        if (is_active_lru(lru)) {
-               if (inactive_list_is_low(lruvec, is_file_lru(lru),
-                                        memcg, sc, true))
+               if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true))
                        shrink_active_list(nr_to_scan, lruvec, sc, lru);
                return 0;
        }
@@ -2332,7 +2325,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
                         * anonymous pages on the LRU in eligible zones.
                         * Otherwise, the small LRU gets thrashed.
                         */
-                       if (!inactive_list_is_low(lruvec, false, memcg, sc, false) &&
+                       if (!inactive_list_is_low(lruvec, false, sc, false) &&
                            lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx)
                                        >> sc->priority) {
                                scan_balance = SCAN_ANON;
@@ -2350,7 +2343,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
         * lruvec even if it has plenty of old anonymous pages unless the
         * system is under heavy pressure.
         */
-       if (!inactive_list_is_low(lruvec, true, memcg, sc, false) &&
+       if (!inactive_list_is_low(lruvec, true, sc, false) &&
            lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
                scan_balance = SCAN_FILE;
                goto out;
@@ -2503,7 +2496,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
                                nr[lru] -= nr_to_scan;
 
                                nr_reclaimed += shrink_list(lru, nr_to_scan,
-                                                           lruvec, memcg, sc);
+                                                           lruvec, sc);
                        }
                }
 
@@ -2570,7 +2563,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
         * Even if we did not try to evict anon pages at all, we want to
         * rebalance the anon lru active/inactive ratio.
         */
-       if (inactive_list_is_low(lruvec, false, memcg, sc, true))
+       if (inactive_list_is_low(lruvec, false, sc, true))
                shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
                                   sc, LRU_ACTIVE_ANON);
 }
@@ -2969,12 +2962,8 @@ static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
                unsigned long refaults;
                struct lruvec *lruvec;
 
-               if (memcg)
-                       refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
-               else
-                       refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
-
                lruvec = mem_cgroup_lruvec(pgdat, memcg);
+               refaults = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE);
                lruvec->refaults = refaults;
        } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL)));
 }
@@ -3339,7 +3328,7 @@ static void age_active_anon(struct pglist_data *pgdat,
        do {
                struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
 
-               if (inactive_list_is_low(lruvec, false, memcg, sc, true))
+               if (inactive_list_is_low(lruvec, false, sc, true))
                        shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
                                           sc, LRU_ACTIVE_ANON);
 
index 36b56f858f0f0a8eb8db003d2b2a74a21a4c37f1..a7d493366a65b31b547ef25d34b94f2417895372 100644 (file)
@@ -1274,13 +1274,8 @@ const char * const vmstat_text[] = {
 #endif
 #endif /* CONFIG_MEMORY_BALLOON */
 #ifdef CONFIG_DEBUG_TLBFLUSH
-#ifdef CONFIG_SMP
        "nr_tlb_remote_flush",
        "nr_tlb_remote_flush_received",
-#else
-       "", /* nr_tlb_remote_flush */
-       "", /* nr_tlb_remote_flush_received */
-#endif /* CONFIG_SMP */
        "nr_tlb_local_flush_all",
        "nr_tlb_local_flush_one",
 #endif /* CONFIG_DEBUG_TLBFLUSH */
index 15293c2a5dd821d39233c177d540e4db7407b110..8d77b6ee4477df71bc466c057338ea4ae62dcd88 100644 (file)
@@ -443,27 +443,29 @@ static int vlan_dev_fcoe_disable(struct net_device *dev)
        return rc;
 }
 
-static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
+                                   struct scatterlist *sgl, unsigned int sgc)
 {
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
-       int rc = -EINVAL;
+       int rc = 0;
+
+       if (ops->ndo_fcoe_ddp_target)
+               rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
 
-       if (ops->ndo_fcoe_get_wwn)
-               rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
        return rc;
 }
+#endif
 
-static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
-                                   struct scatterlist *sgl, unsigned int sgc)
+#ifdef NETDEV_FCOE_WWNN
+static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
 {
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
-       int rc = 0;
-
-       if (ops->ndo_fcoe_ddp_target)
-               rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
+       int rc = -EINVAL;
 
+       if (ops->ndo_fcoe_get_wwn)
+               rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
        return rc;
 }
 #endif
@@ -794,9 +796,11 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_fcoe_ddp_done      = vlan_dev_fcoe_ddp_done,
        .ndo_fcoe_enable        = vlan_dev_fcoe_enable,
        .ndo_fcoe_disable       = vlan_dev_fcoe_disable,
-       .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
        .ndo_fcoe_ddp_target    = vlan_dev_fcoe_ddp_target,
 #endif
+#ifdef NETDEV_FCOE_WWNN
+       .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
+#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = vlan_dev_poll_controller,
        .ndo_netpoll_setup      = vlan_dev_netpoll_setup,
index 49a16cee2aae97f013ef79401feb0b4de3c4a44e..420a98bf79b536d11f862026ff71747a67301cf4 100644 (file)
@@ -879,15 +879,24 @@ static struct notifier_block aarp_notifier = {
 
 static unsigned char aarp_snap_id[] = { 0x00, 0x00, 0x00, 0x80, 0xF3 };
 
-void __init aarp_proto_init(void)
+int __init aarp_proto_init(void)
 {
+       int rc;
+
        aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv);
-       if (!aarp_dl)
+       if (!aarp_dl) {
                printk(KERN_CRIT "Unable to register AARP with SNAP.\n");
+               return -ENOMEM;
+       }
        timer_setup(&aarp_timer, aarp_expire_timeout, 0);
        aarp_timer.expires  = jiffies + sysctl_aarp_expiry_time;
        add_timer(&aarp_timer);
-       register_netdevice_notifier(&aarp_notifier);
+       rc = register_netdevice_notifier(&aarp_notifier);
+       if (rc) {
+               del_timer_sync(&aarp_timer);
+               unregister_snap_client(aarp_dl);
+       }
+       return rc;
 }
 
 /* Remove the AARP entries associated with a device. */
index 795fbc6c06aa7a9e7078aafafad97024373afeb3..dbe8b1993be9eec3c84e2be09602a5f471ed3f34 100644 (file)
@@ -1904,9 +1904,6 @@ static unsigned char ddp_snap_id[] = { 0x08, 0x00, 0x07, 0x80, 0x9B };
 EXPORT_SYMBOL(atrtr_get_dev);
 EXPORT_SYMBOL(atalk_find_dev_addr);
 
-static const char atalk_err_snap[] __initconst =
-       KERN_CRIT "Unable to register DDP with SNAP.\n";
-
 /* Called by proto.c on kernel start up */
 static int __init atalk_init(void)
 {
@@ -1921,17 +1918,23 @@ static int __init atalk_init(void)
                goto out_proto;
 
        ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
-       if (!ddp_dl)
-               printk(atalk_err_snap);
+       if (!ddp_dl) {
+               pr_crit("Unable to register DDP with SNAP.\n");
+               rc = -ENOMEM;
+               goto out_sock;
+       }
 
        dev_add_pack(&ltalk_packet_type);
        dev_add_pack(&ppptalk_packet_type);
 
        rc = register_netdevice_notifier(&ddp_notifier);
        if (rc)
-               goto out_sock;
+               goto out_snap;
+
+       rc = aarp_proto_init();
+       if (rc)
+               goto out_dev;
 
-       aarp_proto_init();
        rc = atalk_proc_init();
        if (rc)
                goto out_aarp;
@@ -1945,11 +1948,13 @@ out_proc:
        atalk_proc_exit();
 out_aarp:
        aarp_cleanup_module();
+out_dev:
        unregister_netdevice_notifier(&ddp_notifier);
-out_sock:
+out_snap:
        dev_remove_pack(&ppptalk_packet_type);
        dev_remove_pack(&ltalk_packet_type);
        unregister_snap_client(ddp_dl);
+out_sock:
        sock_unregister(PF_APPLETALK);
 out_proto:
        proto_unregister(&ddp_proto);
index d7f5cf5b7594d0ea4e766e06fbc07e6fce590e3b..ad4f829193f053c8a0c0846f1e9f619617dcd18e 100644 (file)
@@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
 
 static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
 {
-       if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
+       if (arg < 0 || arg >= MAX_LEC_ITF)
+               return -EINVAL;
+       arg = array_index_nospec(arg, MAX_LEC_ITF);
+       if (!dev_lec[arg])
                return -EINVAL;
        vcc->proto_data = dev_lec[arg];
        return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
@@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
                i = arg;
        if (arg >= MAX_LEC_ITF)
                return -EINVAL;
+       i = array_index_nospec(arg, MAX_LEC_ITF);
        if (!dev_lec[i]) {
                int size;
 
index a9b7919c9de55396d35b152a86edbf814287d8dd..d5df0114f08ac5331dcd46699551556ba60a4b80 100644 (file)
@@ -104,8 +104,10 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
 
                ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
 
-               /* free the TID stats immediately */
-               cfg80211_sinfo_release_content(&sinfo);
+               if (!ret) {
+                       /* free the TID stats immediately */
+                       cfg80211_sinfo_release_content(&sinfo);
+               }
 
                dev_put(real_netdev);
                if (ret == -ENOENT) {
index ef39aabdb69435f384ae2ebef26d4d31367f427c..4fb01108e5f534b8a055edface0717f950ccdacc 100644 (file)
@@ -803,6 +803,8 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
                                 const u8 *mac, const unsigned short vid)
 {
        struct batadv_bla_claim search_claim, *claim;
+       struct batadv_bla_claim *claim_removed_entry;
+       struct hlist_node *claim_removed_node;
 
        ether_addr_copy(search_claim.addr, mac);
        search_claim.vid = vid;
@@ -813,10 +815,18 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
        batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
                   mac, batadv_print_vid(vid));
 
-       batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
-                          batadv_choose_claim, claim);
-       batadv_claim_put(claim); /* reference from the hash is gone */
+       claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
+                                               batadv_compare_claim,
+                                               batadv_choose_claim, claim);
+       if (!claim_removed_node)
+               goto free_claim;
 
+       /* reference from the hash is gone */
+       claim_removed_entry = hlist_entry(claim_removed_node,
+                                         struct batadv_bla_claim, hash_entry);
+       batadv_claim_put(claim_removed_entry);
+
+free_claim:
        /* don't need the reference from hash_find() anymore */
        batadv_claim_put(claim);
 }
index 0b4b3fb778a61708978438bcaf7adfd05b997668..208655cf67179b5c2bbcfb8330993b42ca70109f 100644 (file)
@@ -1116,9 +1116,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
                                                struct attribute *attr,
                                                char *buff, size_t count)
 {
-       struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
        struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
        struct batadv_hard_iface *hard_iface;
+       struct batadv_priv *bat_priv;
        u32 tp_override;
        u32 old_tp_override;
        bool ret;
@@ -1147,7 +1147,10 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
 
        atomic_set(&hard_iface->bat_v.throughput_override, tp_override);
 
-       batadv_netlink_notify_hardif(bat_priv, hard_iface);
+       if (hard_iface->soft_iface) {
+               bat_priv = netdev_priv(hard_iface->soft_iface);
+               batadv_netlink_notify_hardif(bat_priv, hard_iface);
+       }
 
 out:
        batadv_hardif_put(hard_iface);
index f73d79139ae79834a3e429fab82b7d55e8a373d5..26c4e2493ddfbfdea26a9b45409821834010a913 100644 (file)
@@ -616,14 +616,26 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
                                  struct batadv_tt_global_entry *tt_global,
                                  const char *message)
 {
+       struct batadv_tt_global_entry *tt_removed_entry;
+       struct hlist_node *tt_removed_node;
+
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Deleting global tt entry %pM (vid: %d): %s\n",
                   tt_global->common.addr,
                   batadv_print_vid(tt_global->common.vid), message);
 
-       batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
-                          batadv_choose_tt, &tt_global->common);
-       batadv_tt_global_entry_put(tt_global);
+       tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
+                                            batadv_compare_tt,
+                                            batadv_choose_tt,
+                                            &tt_global->common);
+       if (!tt_removed_node)
+               return;
+
+       /* drop reference of remove hash entry */
+       tt_removed_entry = hlist_entry(tt_removed_node,
+                                      struct batadv_tt_global_entry,
+                                      common.hash_entry);
+       batadv_tt_global_entry_put(tt_removed_entry);
 }
 
 /**
@@ -1337,9 +1349,10 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
                           unsigned short vid, const char *message,
                           bool roaming)
 {
+       struct batadv_tt_local_entry *tt_removed_entry;
        struct batadv_tt_local_entry *tt_local_entry;
        u16 flags, curr_flags = BATADV_NO_FLAGS;
-       void *tt_entry_exists;
+       struct hlist_node *tt_removed_node;
 
        tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
@@ -1368,15 +1381,18 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
         */
        batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
 
-       tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
+       tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
                                             batadv_compare_tt,
                                             batadv_choose_tt,
                                             &tt_local_entry->common);
-       if (!tt_entry_exists)
+       if (!tt_removed_node)
                goto out;
 
-       /* extra call to free the local tt entry */
-       batadv_tt_local_entry_put(tt_local_entry);
+       /* drop reference of remove hash entry */
+       tt_removed_entry = hlist_entry(tt_removed_node,
+                                      struct batadv_tt_local_entry,
+                                      common.hash_entry);
+       batadv_tt_local_entry_put(tt_removed_entry);
 
 out:
        if (tt_local_entry)
index 9a580999ca57e3037336bbcdb321dbb4ef0cb196..d892b7c3cc42a05e10053832d7bd4d969f019e46 100644 (file)
@@ -523,12 +523,12 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr,
        struct sock *sk = sock->sk;
        int err = 0;
 
-       BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
-
        if (!addr || addr_len < sizeof(struct sockaddr_sco) ||
            addr->sa_family != AF_BLUETOOTH)
                return -EINVAL;
 
+       BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
+
        lock_sock(sk);
 
        if (sk->sk_state != BT_OPEN) {
index 5ea7e56119c13876a8726ffee2e9dc43ce73406f..ba303ee99b9b59762e724072d0f66907f46235b2 100644 (file)
@@ -197,13 +197,10 @@ static void __br_handle_local_finish(struct sk_buff *skb)
 /* note: already called with rcu_read_lock */
 static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       struct net_bridge_port *p = br_port_get_rcu(skb->dev);
-
        __br_handle_local_finish(skb);
 
-       BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
-       br_pass_frame_up(skb);
-       return 0;
+       /* return 1 to signal the okfn() was called so it's ok to use the skb */
+       return 1;
 }
 
 /*
@@ -280,10 +277,18 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
                                goto forward;
                }
 
-               /* Deliver packet to local host only */
-               NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
-                       NULL, skb, skb->dev, NULL, br_handle_local_finish);
-               return RX_HANDLER_CONSUMED;
+               /* The else clause should be hit when nf_hook():
+                *   - returns < 0 (drop/error)
+                *   - returns = 0 (stolen/nf_queue)
+                * Thus return 1 from the okfn() to signal the skb is ok to pass
+                */
+               if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
+                           dev_net(skb->dev), NULL, skb, skb->dev, NULL,
+                           br_handle_local_finish) == 1) {
+                       return RX_HANDLER_PASS;
+               } else {
+                       return RX_HANDLER_CONSUMED;
+               }
        }
 
 forward:
index a0e369179f6d1316ec261521b857a7687272d3b9..45e7f4173bbafe7e59e2ea514a7bccbee8456c79 100644 (file)
@@ -601,6 +601,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
        if (ipv4_is_local_multicast(group))
                return 0;
 
+       memset(&br_group, 0, sizeof(br_group));
        br_group.u.ip4 = group;
        br_group.proto = htons(ETH_P_IP);
        br_group.vid = vid;
@@ -1497,6 +1498,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
 
        own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
 
+       memset(&br_group, 0, sizeof(br_group));
        br_group.u.ip4 = group;
        br_group.proto = htons(ETH_P_IP);
        br_group.vid = vid;
@@ -1520,6 +1522,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
 
        own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
 
+       memset(&br_group, 0, sizeof(br_group));
        br_group.u.ip6 = *group;
        br_group.proto = htons(ETH_P_IPV6);
        br_group.vid = vid;
@@ -2028,7 +2031,8 @@ static void br_multicast_start_querier(struct net_bridge *br,
 
        __br_multicast_open(br, query);
 
-       list_for_each_entry(port, &br->port_list, list) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &br->port_list, list) {
                if (port->state == BR_STATE_DISABLED ||
                    port->state == BR_STATE_BLOCKING)
                        continue;
@@ -2040,6 +2044,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
                        br_multicast_enable(&port->ip6_own_query);
 #endif
        }
+       rcu_read_unlock();
 }
 
 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
index 9d34de68571be8969ee7a57d9f2eb680777b1cea..22afa566cbce9cd6d58abe3ead14ffd7199fd18d 100644 (file)
@@ -502,6 +502,7 @@ static unsigned int br_nf_pre_routing(void *priv,
        nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
 
        skb->protocol = htons(ETH_P_IP);
+       skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4;
 
        NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
                skb->dev, NULL,
index 564710f88f938cb314f2f59177289ecb127454f4..e88d6641647bab45397f5206737b367ea60cb9b0 100644 (file)
@@ -235,6 +235,8 @@ unsigned int br_nf_pre_routing_ipv6(void *priv,
        nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr;
 
        skb->protocol = htons(ETH_P_IPV6);
+       skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
+
        NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
                skb->dev, NULL,
                br_nf_pre_routing_finish_ipv6);
index 9c07591b0232e6bbdecf7efece7c9143842671b1..7104cf13da840d21cca1a63d3c1551ff9bbd9076 100644 (file)
@@ -1441,7 +1441,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
            nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
                       br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
            nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
-                      br_opt_get(br, IFLA_BR_VLAN_STATS_PER_PORT)))
+                      br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)))
                return -EMSGSIZE;
 #endif
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
index eb15891f8b9ff18842b7d43e96c75733ef7aaa99..3cad01ac64e4a2a5ebcafa394d04cc4415c7ddab 100644 (file)
@@ -2032,7 +2032,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
                if (match_kern)
                        match_kern->match_size = ret;
 
-               if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
+               /* rule should have no remaining data after target */
+               if (type == EBT_COMPAT_TARGET && size_left)
                        return -EINVAL;
 
                match32 = (struct compat_ebt_entry_mwt *) buf;
index 9cab80207ced6346b5b5b97b56e98674f28def73..79eac465ec65e99cf22e475f3edb0609ea4787e5 100644 (file)
@@ -738,7 +738,6 @@ int __ceph_open_session(struct ceph_client *client, unsigned long started)
 }
 EXPORT_SYMBOL(__ceph_open_session);
 
-
 int ceph_open_session(struct ceph_client *client)
 {
        int ret;
@@ -754,6 +753,23 @@ int ceph_open_session(struct ceph_client *client)
 }
 EXPORT_SYMBOL(ceph_open_session);
 
+int ceph_wait_for_latest_osdmap(struct ceph_client *client,
+                               unsigned long timeout)
+{
+       u64 newest_epoch;
+       int ret;
+
+       ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
+       if (ret)
+               return ret;
+
+       if (client->osdc.osdmap->epoch >= newest_epoch)
+               return 0;
+
+       ceph_osdc_maybe_request_map(&client->osdc);
+       return ceph_monc_wait_osdmap(&client->monc, newest_epoch, timeout);
+}
+EXPORT_SYMBOL(ceph_wait_for_latest_osdmap);
 
 static int __init init_ceph_lib(void)
 {
index 7e71b0df1fbc9185b192a43427c7cb281b778ca1..3083988ce729dbe01771e9433b7de72e484394f9 100644 (file)
@@ -840,6 +840,7 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
                                        size_t bytes)
 {
        struct ceph_bio_iter *it = &cursor->bio_iter;
+       struct page *page = bio_iter_page(it->bio, it->iter);
 
        BUG_ON(bytes > cursor->resid);
        BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
@@ -851,7 +852,8 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
                return false;   /* no more data */
        }
 
-       if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done))
+       if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
+                      page == bio_iter_page(it->bio, it->iter)))
                return false;   /* more bytes to process in this segment */
 
        if (!it->iter.bi_size) {
@@ -899,6 +901,7 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
                                        size_t bytes)
 {
        struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
+       struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
 
        BUG_ON(bytes > cursor->resid);
        BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
@@ -910,7 +913,8 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
                return false;   /* no more data */
        }
 
-       if (!bytes || cursor->bvec_iter.bi_bvec_done)
+       if (!bytes || (cursor->bvec_iter.bi_bvec_done &&
+                      page == bvec_iter_page(bvecs, cursor->bvec_iter)))
                return false;   /* more bytes to process in this segment */
 
        BUG_ON(cursor->last_piece);
index 18deb3d889c4ae94a10417457fa0275a77568b48..a53e4fbb631918ccf94536849e5e25acad2dfdc6 100644 (file)
@@ -922,6 +922,15 @@ int ceph_monc_blacklist_add(struct ceph_mon_client *monc,
        mutex_unlock(&monc->mutex);
 
        ret = wait_generic_request(req);
+       if (!ret)
+               /*
+                * Make sure we have the osdmap that includes the blacklist
+                * entry.  This is needed to ensure that the OSDs pick up the
+                * new blacklist before processing any future requests from
+                * this client.
+                */
+               ret = ceph_wait_for_latest_osdmap(monc->client, 0);
+
 out:
        put_generic_request(req);
        return ret;
index b2651bb6d2a31dde065000c59bbbf3dfdadd976e..e657289db4ac44b023ce40ca9959185a6a33cb22 100644 (file)
@@ -279,7 +279,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
                        break;
 
                sk_busy_loop(sk, flags & MSG_DONTWAIT);
-       } while (!skb_queue_empty(&sk->sk_receive_queue));
+       } while (sk->sk_receive_queue.prev != *last);
 
        error = -EAGAIN;
 
index 2b67f2aa59ddb64d27378bed44f9d262093219e0..f409406254ddf2e204676bb8bdfb95d0cb3a0e71 100644 (file)
@@ -1184,7 +1184,21 @@ int dev_change_name(struct net_device *dev, const char *newname)
        BUG_ON(!dev_net(dev));
 
        net = dev_net(dev);
-       if (dev->flags & IFF_UP)
+
+       /* Some auto-enslaved devices e.g. failover slaves are
+        * special, as userspace might rename the device after
+        * the interface had been brought up and running since
+        * the point kernel initiated auto-enslavement. Allow
+        * live name change even when these slave devices are
+        * up and running.
+        *
+        * Typically, users of these auto-enslaving devices
+        * don't actually care about slave name change, as
+        * they are supposed to operate on master interface
+        * directly.
+        */
+       if (dev->flags & IFF_UP &&
+           likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
                return -EBUSY;
 
        write_seqcount_begin(&devnet_rename_seq);
@@ -5014,8 +5028,10 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
        if (pt_prev->list_func != NULL)
                pt_prev->list_func(head, pt_prev, orig_dev);
        else
-               list_for_each_entry_safe(skb, next, head, list)
+               list_for_each_entry_safe(skb, next, head, list) {
+                       skb_list_del_init(skb);
                        pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+               }
 }
 
 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
index 78e22cea4cc79589e22781849a843397904e220c..da0a29f30885d1f54b975ecfa4f83b47c68be543 100644 (file)
@@ -3897,6 +3897,11 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
                        continue;
                }
 
+               if (!devlink->ops->info_get) {
+                       idx++;
+                       continue;
+               }
+
                mutex_lock(&devlink->lock);
                err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
                                           NETLINK_CB(cb->skb).portid,
index b1eb324197321dd390596edd2aec6c343ee14654..36ed619faf3641ae4d40edc7385b003de7500764 100644 (file)
@@ -1797,11 +1797,16 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
        WARN_ON_ONCE(!ret);
 
        gstrings.len = ret;
-       data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
-       if (gstrings.len && !data)
-               return -ENOMEM;
 
-       __ethtool_get_strings(dev, gstrings.string_set, data);
+       if (gstrings.len) {
+               data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
+               if (!data)
+                       return -ENOMEM;
+
+               __ethtool_get_strings(dev, gstrings.string_set, data);
+       } else {
+               data = NULL;
+       }
 
        ret = -EFAULT;
        if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
@@ -1897,11 +1902,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
                return -EFAULT;
 
        stats.n_stats = n_stats;
-       data = vzalloc(array_size(n_stats, sizeof(u64)));
-       if (n_stats && !data)
-               return -ENOMEM;
 
-       ops->get_ethtool_stats(dev, &stats, data);
+       if (n_stats) {
+               data = vzalloc(array_size(n_stats, sizeof(u64)));
+               if (!data)
+                       return -ENOMEM;
+               ops->get_ethtool_stats(dev, &stats, data);
+       } else {
+               data = NULL;
+       }
 
        ret = -EFAULT;
        if (copy_to_user(useraddr, &stats, sizeof(stats)))
@@ -1941,16 +1950,21 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
                return -EFAULT;
 
        stats.n_stats = n_stats;
-       data = vzalloc(array_size(n_stats, sizeof(u64)));
-       if (n_stats && !data)
-               return -ENOMEM;
 
-       if (dev->phydev && !ops->get_ethtool_phy_stats) {
-               ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
-               if (ret < 0)
-                       return ret;
+       if (n_stats) {
+               data = vzalloc(array_size(n_stats, sizeof(u64)));
+               if (!data)
+                       return -ENOMEM;
+
+               if (dev->phydev && !ops->get_ethtool_phy_stats) {
+                       ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
+                       if (ret < 0)
+                               goto out;
+               } else {
+                       ops->get_ethtool_phy_stats(dev, &stats, data);
+               }
        } else {
-               ops->get_ethtool_phy_stats(dev, &stats, data);
+               data = NULL;
        }
 
        ret = -EFAULT;
index 4a92a98ccce9a0570cdc75c66180db2f7305073f..b5cd3c727285d7a1738118c246abce8d31dac08f 100644 (file)
@@ -80,14 +80,14 @@ static int failover_slave_register(struct net_device *slave_dev)
                goto err_upper_link;
        }
 
-       slave_dev->priv_flags |= IFF_FAILOVER_SLAVE;
+       slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
 
        if (fops && fops->slave_register &&
            !fops->slave_register(slave_dev, failover_dev))
                return NOTIFY_OK;
 
        netdev_upper_dev_unlink(slave_dev, failover_dev);
-       slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
+       slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
 err_upper_link:
        netdev_rx_handler_unregister(slave_dev);
 done:
@@ -121,7 +121,7 @@ int failover_slave_unregister(struct net_device *slave_dev)
 
        netdev_rx_handler_unregister(slave_dev);
        netdev_upper_dev_unlink(slave_dev, failover_dev);
-       slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
+       slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
 
        if (fops && fops->slave_unregister &&
            !fops->slave_unregister(slave_dev, failover_dev))
index f274620945ff06085beaf411d7d6f9912ec4ba66..27e61ffd903931c45f0a3f2f6e436937058dfb39 100644 (file)
@@ -1796,8 +1796,6 @@ static const struct bpf_func_proto bpf_skb_pull_data_proto = {
 
 BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
 {
-       sk = sk_to_full_sk(sk);
-
        return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
 }
 
@@ -4385,6 +4383,8 @@ BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
         * Only binding to IP is supported.
         */
        err = -EINVAL;
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return err;
        if (addr->sa_family == AF_INET) {
                if (addr_len < sizeof(struct sockaddr_in))
                        return err;
@@ -5266,7 +5266,7 @@ static const struct bpf_func_proto bpf_sk_release_proto = {
        .func           = bpf_sk_release,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
-       .arg1_type      = ARG_PTR_TO_SOCKET,
+       .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
 };
 
 BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
@@ -5407,8 +5407,6 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
 
 BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
 {
-       sk = sk_to_full_sk(sk);
-
        if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
                return (unsigned long)sk;
 
@@ -5422,6 +5420,23 @@ static const struct bpf_func_proto bpf_tcp_sock_proto = {
        .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
 };
 
+BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
+{
+       sk = sk_to_full_sk(sk);
+
+       if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
+               return (unsigned long)sk;
+
+       return (unsigned long)NULL;
+}
+
+static const struct bpf_func_proto bpf_get_listener_sock_proto = {
+       .func           = bpf_get_listener_sock,
+       .gpl_only       = false,
+       .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
+       .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
+};
+
 BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
 {
        unsigned int iphdr_len;
@@ -5607,6 +5622,8 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 #ifdef CONFIG_INET
        case BPF_FUNC_tcp_sock:
                return &bpf_tcp_sock_proto;
+       case BPF_FUNC_get_listener_sock:
+               return &bpf_get_listener_sock_proto;
        case BPF_FUNC_skb_ecn_set_ce:
                return &bpf_skb_ecn_set_ce_proto;
 #endif
@@ -5702,6 +5719,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_sk_release_proto;
        case BPF_FUNC_tcp_sock:
                return &bpf_tcp_sock_proto;
+       case BPF_FUNC_get_listener_sock:
+               return &bpf_get_listener_sock_proto;
 #endif
        default:
                return bpf_base_func_proto(func_id);
@@ -6596,14 +6615,8 @@ static bool flow_dissector_is_valid_access(int off, int size,
                                           const struct bpf_prog *prog,
                                           struct bpf_insn_access_aux *info)
 {
-       if (type == BPF_WRITE) {
-               switch (off) {
-               case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
-                       break;
-               default:
-                       return false;
-               }
-       }
+       if (type == BPF_WRITE)
+               return false;
 
        switch (off) {
        case bpf_ctx_range(struct __sk_buff, data):
@@ -6615,11 +6628,7 @@ static bool flow_dissector_is_valid_access(int off, int size,
        case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
                info->reg_type = PTR_TO_FLOW_KEYS;
                break;
-       case bpf_ctx_range(struct __sk_buff, tc_classid):
-       case bpf_ctx_range(struct __sk_buff, data_meta):
-       case bpf_ctx_range_till(struct __sk_buff, family, local_port):
-       case bpf_ctx_range(struct __sk_buff, tstamp):
-       case bpf_ctx_range(struct __sk_buff, wire_len):
+       default:
                return false;
        }
 
index bb1a54747d64811a5545a78243f8cf021a4adf46..94a450b2191a9e25ca79534f9caa68a442daa80c 100644 (file)
@@ -707,6 +707,7 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
        /* Pass parameters to the BPF program */
        memset(flow_keys, 0, sizeof(*flow_keys));
        cb->qdisc_cb.flow_keys = flow_keys;
+       flow_keys->n_proto = skb->protocol;
        flow_keys->nhoff = skb_network_offset(skb);
        flow_keys->thoff = flow_keys->nhoff;
 
@@ -716,7 +717,8 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
        /* Restore state */
        memcpy(cb, &cb_saved, sizeof(cb_saved));
 
-       flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len);
+       flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff,
+                                  skb_network_offset(skb), skb->len);
        flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
                                   flow_keys->nhoff, skb->len);
 
index 4ff661f6f989ae10ca49a1e81c825be56683d026..8f8b7b6c2945a75406c15e5faac61759a02db717 100644 (file)
@@ -928,6 +928,8 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
        if (error)
                return error;
 
+       dev_hold(queue->dev);
+
        if (dev->sysfs_rx_queue_group) {
                error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
                if (error) {
@@ -937,7 +939,6 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
        }
 
        kobject_uevent(kobj, KOBJ_ADD);
-       dev_hold(queue->dev);
 
        return error;
 }
@@ -1464,6 +1465,8 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
        if (error)
                return error;
 
+       dev_hold(queue->dev);
+
 #ifdef CONFIG_BQL
        error = sysfs_create_group(kobj, &dql_group);
        if (error) {
@@ -1473,7 +1476,6 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
 #endif
 
        kobject_uevent(kobj, KOBJ_ADD);
-       dev_hold(queue->dev);
 
        return 0;
 }
index 17f36317363d19dcdeb6a6e75a116220b078c2b0..7e6dcc6257011d8b60e132e97a0db229c39d1daf 100644 (file)
@@ -304,6 +304,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
 
        refcount_set(&net->count, 1);
        refcount_set(&net->passive, 1);
+       get_random_bytes(&net->hash_mix, sizeof(u32));
        net->dev_base_seq = 1;
        net->user_ns = user_ns;
        idr_init(&net->netns_ids);
index 703cf76aa7c2dee7c5b556f5f035c015780f55f0..7109c168b5e0fb20b8b6ad8951893b181803fad8 100644 (file)
@@ -185,9 +185,10 @@ void __init ptp_classifier_init(void)
                { 0x16,  0,  0, 0x00000000 },
                { 0x06,  0,  0, 0x00000000 },
        };
-       struct sock_fprog_kern ptp_prog = {
-               .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
-       };
+       struct sock_fprog_kern ptp_prog;
+
+       ptp_prog.len = ARRAY_SIZE(ptp_filter);
+       ptp_prog.filter = ptp_filter;
 
        BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog));
 }
index a51cab95ba64c7d76a2ba0940c67e9f6e53f54e1..220c56e936592495656962050d285bb1c0024b37 100644 (file)
@@ -4948,7 +4948,7 @@ static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
 {
        struct if_stats_msg *ifsm;
 
-       if (nlh->nlmsg_len < sizeof(*ifsm)) {
+       if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
                NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
                return -EINVAL;
        }
index 2415d9cb9b89fefb30a7932a70c3497aeb67c80e..40796b8bf820450f5d0cce38986bd29137e2fd05 100644 (file)
@@ -3801,7 +3801,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
        unsigned int delta_truesize;
        struct sk_buff *lp;
 
-       if (unlikely(p->len + len >= 65536))
+       if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
                return -E2BIG;
 
        lp = NAPI_GRO_CB(p)->last;
@@ -5083,7 +5083,8 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
 
 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
 {
-       int mac_len;
+       int mac_len, meta_len;
+       void *meta;
 
        if (skb_cow(skb, skb_headroom(skb)) < 0) {
                kfree_skb(skb);
@@ -5095,6 +5096,13 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
                memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
                        mac_len - VLAN_HLEN - ETH_TLEN);
        }
+
+       meta_len = skb_metadata_len(skb);
+       if (meta_len) {
+               meta = skb_metadata_end(skb) - meta_len;
+               memmove(meta + VLAN_HLEN, meta, meta_len);
+       }
+
        skb->mac_header += VLAN_HLEN;
        return skb;
 }
index 782343bb925b643348cc906a70b97caa0388178d..067878a1e4c51363e065e13ccdb2b9d03c6a9c5f 100644 (file)
@@ -348,7 +348,7 @@ static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
                tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
        }
 
-       if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
+       if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
                struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
                *(struct old_timeval32 *)optval = tv32;
                return sizeof(tv32);
@@ -372,7 +372,7 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool
 {
        struct __kernel_sock_timeval tv;
 
-       if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
+       if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
                struct old_timeval32 tv32;
 
                if (optlen < sizeof(tv32))
index f227f002c73d382fecd98c8857ce4c9139cb7a8a..db87d9f5801983913e66549e5d5911ead10f3ac1 100644 (file)
@@ -738,7 +738,12 @@ static int __feat_register_sp(struct list_head *fn, u8 feat, u8 is_local,
        if (dccp_feat_clone_sp_val(&fval, sp_val, sp_len))
                return -ENOMEM;
 
-       return dccp_feat_push_change(fn, feat, is_local, mandatory, &fval);
+       if (dccp_feat_push_change(fn, feat, is_local, mandatory, &fval)) {
+               kfree(fval.sp.vec);
+               return -ENOMEM;
+       }
+
+       return 0;
 }
 
 /**
index d5740bad5b1811cd42e44fb3b0da6edabbf18095..57d84e9b7b6fc820a4616e3ac326633f14e2fd95 100644 (file)
@@ -436,8 +436,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
                newnp->ipv6_mc_list = NULL;
                newnp->ipv6_ac_list = NULL;
                newnp->ipv6_fl_list = NULL;
-               newnp->mcast_oif   = inet6_iif(skb);
-               newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
+               newnp->mcast_oif   = inet_iif(skb);
+               newnp->mcast_hops  = ip_hdr(skb)->ttl;
 
                /*
                 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
index ed4f6dc26365baa3e9988b2f11ac26d8ffeb55b7..85c22ada47449d580ee2a175c0729f4bad2cad61 100644 (file)
@@ -98,8 +98,18 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
        return skb;
 }
 
+static int qca_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
+                                int *offset)
+{
+       *offset = QCA_HDR_LEN;
+       *proto = ((__be16 *)skb->data)[0];
+
+       return 0;
+}
+
 const struct dsa_device_ops qca_netdev_ops = {
        .xmit   = qca_tag_xmit,
        .rcv    = qca_tag_rcv,
+       .flow_dissect = qca_tag_flow_dissect,
        .overhead = QCA_HDR_LEN,
 };
index 10e809b296ec8644e108923c6faa1e4e2179bc20..fb065a8937ea28788f43a3906cbe08e3c0fc8c0d 100644 (file)
@@ -226,7 +226,7 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
        tail[plen - 1] = proto;
 }
 
-static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
+static int esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 {
        int encap_type;
        struct udphdr *uh;
@@ -234,6 +234,7 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
        __be16 sport, dport;
        struct xfrm_encap_tmpl *encap = x->encap;
        struct ip_esp_hdr *esph = esp->esph;
+       unsigned int len;
 
        spin_lock_bh(&x->lock);
        sport = encap->encap_sport;
@@ -241,11 +242,14 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
        encap_type = encap->encap_type;
        spin_unlock_bh(&x->lock);
 
+       len = skb->len + esp->tailen - skb_transport_offset(skb);
+       if (len + sizeof(struct iphdr) >= IP_MAX_MTU)
+               return -EMSGSIZE;
+
        uh = (struct udphdr *)esph;
        uh->source = sport;
        uh->dest = dport;
-       uh->len = htons(skb->len + esp->tailen
-                 - skb_transport_offset(skb));
+       uh->len = htons(len);
        uh->check = 0;
 
        switch (encap_type) {
@@ -262,6 +266,8 @@ static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, stru
 
        *skb_mac_header(skb) = IPPROTO_UDP;
        esp->esph = esph;
+
+       return 0;
 }
 
 int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
@@ -275,8 +281,12 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
        int tailen = esp->tailen;
 
        /* this is non-NULL only with UDP Encapsulation */
-       if (x->encap)
-               esp_output_udp_encap(x, skb, esp);
+       if (x->encap) {
+               int err = esp_output_udp_encap(x, skb, esp);
+
+               if (err < 0)
+                       return err;
+       }
 
        if (!skb_cloned(skb)) {
                if (tailen <= skb_tailroom(skb)) {
index 8756e0e790d2a94a5b4a587c3bc3de0673baf2c4..d3170a8001b2a15bffe5a37ab9e0b556663454a4 100644 (file)
@@ -52,13 +52,13 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
                        goto out;
 
                if (sp->len == XFRM_MAX_DEPTH)
-                       goto out;
+                       goto out_reset;
 
                x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
                                      (xfrm_address_t *)&ip_hdr(skb)->daddr,
                                      spi, IPPROTO_ESP, AF_INET);
                if (!x)
-                       goto out;
+                       goto out_reset;
 
                sp->xvec[sp->len++] = x;
                sp->olen++;
@@ -66,7 +66,7 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
                xo = xfrm_offload(skb);
                if (!xo) {
                        xfrm_state_put(x);
-                       goto out;
+                       goto out_reset;
                }
        }
 
@@ -82,6 +82,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
        xfrm_input(skb, IPPROTO_ESP, spi, -2);
 
        return ERR_PTR(-EINPROGRESS);
+out_reset:
+       secpath_reset(skb);
 out:
        skb_push(skb, offset);
        NAPI_GRO_CB(skb)->same_flow = 0;
index 79e98e21cdd7f971694356065afb3f68fb34c1a0..12ce6c526d72bd15a16a9415ed992c25039d415a 100644 (file)
@@ -121,6 +121,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
        struct guehdr *guehdr;
        void *data;
        u16 doffset = 0;
+       u8 proto_ctype;
 
        if (!fou)
                return 1;
@@ -212,13 +213,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
        if (unlikely(guehdr->control))
                return gue_control_message(skb, guehdr);
 
+       proto_ctype = guehdr->proto_ctype;
        __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
        skb_reset_transport_header(skb);
 
        if (iptunnel_pull_offloads(skb))
                goto drop;
 
-       return -guehdr->proto_ctype;
+       return -proto_ctype;
 
 drop:
        kfree_skb(skb);
index fd219f7bd3ea2c9263ac6d21ed3a66fd6442496c..4b052644147630fbfa8075ee623714ff5013bf94 100644 (file)
@@ -259,7 +259,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
        struct net *net = dev_net(skb->dev);
        struct metadata_dst *tun_dst = NULL;
        struct erspan_base_hdr *ershdr;
-       struct erspan_metadata *pkt_md;
        struct ip_tunnel_net *itn;
        struct ip_tunnel *tunnel;
        const struct iphdr *iph;
@@ -282,9 +281,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                if (unlikely(!pskb_may_pull(skb, len)))
                        return PACKET_REJECT;
 
-               ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
-               pkt_md = (struct erspan_metadata *)(ershdr + 1);
-
                if (__iptunnel_pull_header(skb,
                                           len,
                                           htons(ETH_P_TEB),
@@ -292,8 +288,9 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                        goto drop;
 
                if (tunnel->collect_md) {
+                       struct erspan_metadata *pkt_md, *md;
                        struct ip_tunnel_info *info;
-                       struct erspan_metadata *md;
+                       unsigned char *gh;
                        __be64 tun_id;
                        __be16 flags;
 
@@ -306,6 +303,14 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                        if (!tun_dst)
                                return PACKET_REJECT;
 
+                       /* skb can be uncloned in __iptunnel_pull_header, so
+                        * old pkt_md is no longer valid and we need to reset
+                        * it
+                        */
+                       gh = skb_network_header(skb) +
+                            skb_network_header_len(skb);
+                       pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
+                                                           sizeof(*ershdr));
                        md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
                        md->version = ver;
                        md2 = &md->u.md2;
index ecce2dc78f17eb48d91f8f6638ef0a4e8076fedf..1132d6d1796a4f7c947da76b9b39e7fbe11d3399 100644 (file)
@@ -257,11 +257,10 @@ int ip_local_deliver(struct sk_buff *skb)
                       ip_local_deliver_finish);
 }
 
-static inline bool ip_rcv_options(struct sk_buff *skb)
+static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip_options *opt;
        const struct iphdr *iph;
-       struct net_device *dev = skb->dev;
 
        /* It looks as overkill, because not all
           IP options require packet mangling.
@@ -297,7 +296,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
                        }
                }
 
-               if (ip_options_rcv_srr(skb))
+               if (ip_options_rcv_srr(skb, dev))
                        goto drop;
        }
 
@@ -353,7 +352,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
        }
 #endif
 
-       if (iph->ihl > 5 && ip_rcv_options(skb))
+       if (iph->ihl > 5 && ip_rcv_options(skb, dev))
                goto drop;
 
        rt = skb_rtable(skb);
index 32a35043c9f590314b7fa354d5e948b59e665214..3db31bb9df50622f8c9ae961f4eabc566d1cb74a 100644 (file)
@@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb)
        }
 }
 
-int ip_options_rcv_srr(struct sk_buff *skb)
+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip_options *opt = &(IPCB(skb)->opt);
        int srrspace, srrptr;
@@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
 
                orefdst = skb->_skb_refdst;
                skb_dst_set(skb, NULL);
-               err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
+               err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
                rt2 = skb_rtable(skb);
                if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
                        skb_dst_drop(skb);
index c80188875f39238f8d8ff33603cacf279d3f903a..e8bb2e85c5a471f018193b8a08735606ec7f0526 100644 (file)
@@ -519,6 +519,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
        to->pkt_type = from->pkt_type;
        to->priority = from->priority;
        to->protocol = from->protocol;
+       to->skb_iif = from->skb_iif;
        skb_dst_drop(to);
        skb_dst_copy(to, from);
        to->dev = from->dev;
index 68a21bf75dd0bb860ee61b63b66c8712ffbf9985..35d8346742e2cc2bd7dd242501870a7681da0f96 100644 (file)
@@ -646,10 +646,8 @@ static int __init vti_init(void)
 
        msg = "ipip tunnel";
        err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
-       if (err < 0) {
-               pr_info("%s: cant't register tunnel\n",__func__);
+       if (err < 0)
                goto xfrm_tunnel_failed;
-       }
 
        msg = "netlink interface";
        err = rtnl_link_register(&vti_link_ops);
@@ -659,9 +657,9 @@ static int __init vti_init(void)
        return err;
 
 rtnl_link_failed:
-       xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
-xfrm_tunnel_failed:
        xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
+xfrm_tunnel_failed:
+       xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
 xfrm_proto_comp_failed:
        xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
 xfrm_proto_ah_failed:
@@ -676,6 +674,7 @@ pernet_dev_failed:
 static void __exit vti_fini(void)
 {
        rtnl_link_unregister(&vti_link_ops);
+       xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
        xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
        xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
        xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
index 835d50b279f56d23753d9ca3e93985055f97857e..a2a88ab07f7be0ab29ff75fe5725e391d27a2aca 100644 (file)
@@ -56,7 +56,7 @@ struct clusterip_config {
 #endif
        enum clusterip_hashmode hash_mode;      /* which hashing mode */
        u_int32_t hash_initval;                 /* hash initialization */
-       struct rcu_head rcu;                    /* for call_rcu_bh */
+       struct rcu_head rcu;                    /* for call_rcu */
        struct net *net;                        /* netns for pernet list */
        char ifname[IFNAMSIZ];                  /* device ifname */
 };
index a5da63e5faa2d8118d3044a5a79b5e51bf61cafc..6fdf1c195d8e3a0e32af0359794f798457a21cb3 100644 (file)
@@ -1183,11 +1183,39 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
        return dst;
 }
 
+static void ipv4_send_dest_unreach(struct sk_buff *skb)
+{
+       struct ip_options opt;
+       int res;
+
+       /* Recompile ip options since IPCB may not be valid anymore.
+        * Also check we have a reasonable ipv4 header.
+        */
+       if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
+           ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
+               return;
+
+       memset(&opt, 0, sizeof(opt));
+       if (ip_hdr(skb)->ihl > 5) {
+               if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
+                       return;
+               opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
+
+               rcu_read_lock();
+               res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
+               rcu_read_unlock();
+
+               if (res)
+                       return;
+       }
+       __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
+}
+
 static void ipv4_link_failure(struct sk_buff *skb)
 {
        struct rtable *rt;
 
-       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+       ipv4_send_dest_unreach(skb);
 
        rt = skb_rtable(skb);
        if (rt)
index ba0fc4b1846561559ac995a444992f98a3187894..eeb4041fa5f905fb0f7c91ea6d74851ae97259f8 100644 (file)
@@ -49,6 +49,7 @@ static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 static int comp_sack_nr_max = 255;
 static u32 u32_max_div_HZ = UINT_MAX / HZ;
+static int one_day_secs = 24 * 3600;
 
 /* obsolete */
 static int sysctl_tcp_low_latency __read_mostly;
@@ -1151,7 +1152,9 @@ static struct ctl_table ipv4_net_table[] = {
                .data           = &init_net.ipv4.sysctl_tcp_min_rtt_wlen,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &one_day_secs
        },
        {
                .procname       = "tcp_autocorking",
index cd4814f7e96223447195f0d0ac224c54d5501d2e..477cb4aa456c11c70185a982cbadafba857d3619 100644 (file)
@@ -49,9 +49,8 @@
 #define DCTCP_MAX_ALPHA        1024U
 
 struct dctcp {
-       u32 acked_bytes_ecn;
-       u32 acked_bytes_total;
-       u32 prior_snd_una;
+       u32 old_delivered;
+       u32 old_delivered_ce;
        u32 prior_rcv_nxt;
        u32 dctcp_alpha;
        u32 next_seq;
@@ -67,19 +66,14 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
 module_param(dctcp_alpha_on_init, uint, 0644);
 MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
 
-static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
-module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
-MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
-                "parameter for clamping alpha on loss");
-
 static struct tcp_congestion_ops dctcp_reno;
 
 static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
 {
        ca->next_seq = tp->snd_nxt;
 
-       ca->acked_bytes_ecn = 0;
-       ca->acked_bytes_total = 0;
+       ca->old_delivered = tp->delivered;
+       ca->old_delivered_ce = tp->delivered_ce;
 }
 
 static void dctcp_init(struct sock *sk)
@@ -91,7 +85,6 @@ static void dctcp_init(struct sock *sk)
             sk->sk_state == TCP_CLOSE)) {
                struct dctcp *ca = inet_csk_ca(sk);
 
-               ca->prior_snd_una = tp->snd_una;
                ca->prior_rcv_nxt = tp->rcv_nxt;
 
                ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
@@ -123,37 +116,25 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        struct dctcp *ca = inet_csk_ca(sk);
-       u32 acked_bytes = tp->snd_una - ca->prior_snd_una;
-
-       /* If ack did not advance snd_una, count dupack as MSS size.
-        * If ack did update window, do not count it at all.
-        */
-       if (acked_bytes == 0 && !(flags & CA_ACK_WIN_UPDATE))
-               acked_bytes = inet_csk(sk)->icsk_ack.rcv_mss;
-       if (acked_bytes) {
-               ca->acked_bytes_total += acked_bytes;
-               ca->prior_snd_una = tp->snd_una;
-
-               if (flags & CA_ACK_ECE)
-                       ca->acked_bytes_ecn += acked_bytes;
-       }
 
        /* Expired RTT */
        if (!before(tp->snd_una, ca->next_seq)) {
-               u64 bytes_ecn = ca->acked_bytes_ecn;
+               u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
                u32 alpha = ca->dctcp_alpha;
 
                /* alpha = (1 - g) * alpha + g * F */
 
                alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
-               if (bytes_ecn) {
+               if (delivered_ce) {
+                       u32 delivered = tp->delivered - ca->old_delivered;
+
                        /* If dctcp_shift_g == 1, a 32bit value would overflow
-                        * after 8 Mbytes.
+                        * after 8 M packets.
                         */
-                       bytes_ecn <<= (10 - dctcp_shift_g);
-                       do_div(bytes_ecn, max(1U, ca->acked_bytes_total));
+                       delivered_ce <<= (10 - dctcp_shift_g);
+                       delivered_ce /= max(1U, delivered);
 
-                       alpha = min(alpha + (u32)bytes_ecn, DCTCP_MAX_ALPHA);
+                       alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
                }
                /* dctcp_alpha can be read from dctcp_get_info() without
                 * synchro, so we ask compiler to not use dctcp_alpha
@@ -164,21 +145,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
        }
 }
 
-static void dctcp_state(struct sock *sk, u8 new_state)
+static void dctcp_react_to_loss(struct sock *sk)
 {
-       if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
-               struct dctcp *ca = inet_csk_ca(sk);
+       struct dctcp *ca = inet_csk_ca(sk);
+       struct tcp_sock *tp = tcp_sk(sk);
 
-               /* If this extension is enabled, we clamp dctcp_alpha to
-                * max on packet loss; the motivation is that dctcp_alpha
-                * is an indicator to the extend of congestion and packet
-                * loss is an indicator of extreme congestion; setting
-                * this in practice turned out to be beneficial, and
-                * effectively assumes total congestion which reduces the
-                * window by half.
-                */
-               ca->dctcp_alpha = DCTCP_MAX_ALPHA;
-       }
+       ca->loss_cwnd = tp->snd_cwnd;
+       tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
+}
+
+static void dctcp_state(struct sock *sk, u8 new_state)
+{
+       if (new_state == TCP_CA_Recovery &&
+           new_state != inet_csk(sk)->icsk_ca_state)
+               dctcp_react_to_loss(sk);
+       /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
+        * one loss-adjustment per RTT.
+        */
 }
 
 static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
@@ -190,6 +173,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
        case CA_EVENT_ECN_NO_CE:
                dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
                break;
+       case CA_EVENT_LOSS:
+               dctcp_react_to_loss(sk);
+               break;
        default:
                /* Don't care for the rest. */
                break;
@@ -200,6 +186,7 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
                             union tcp_cc_info *info)
 {
        const struct dctcp *ca = inet_csk_ca(sk);
+       const struct tcp_sock *tp = tcp_sk(sk);
 
        /* Fill it also in case of VEGASINFO due to req struct limits.
         * We can still correctly retrieve it later.
@@ -211,8 +198,10 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
                        info->dctcp.dctcp_enabled = 1;
                        info->dctcp.dctcp_ce_state = (u16) ca->ce_state;
                        info->dctcp.dctcp_alpha = ca->dctcp_alpha;
-                       info->dctcp.dctcp_ab_ecn = ca->acked_bytes_ecn;
-                       info->dctcp.dctcp_ab_tot = ca->acked_bytes_total;
+                       info->dctcp.dctcp_ab_ecn = tp->mss_cache *
+                                                  (tp->delivered_ce - ca->old_delivered_ce);
+                       info->dctcp.dctcp_ab_tot = tp->mss_cache *
+                                                  (tp->delivered - ca->old_delivered);
                }
 
                *attr = INET_DIAG_DCTCPINFO;
index 5def3c48870e17f42ac9424a6ee091ac4824dabc..731d3045b50a0fb9a89c887a154db9a3da8c7ddd 100644 (file)
@@ -402,11 +402,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       int room;
+
+       room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
 
        /* Check #1 */
-       if (tp->rcv_ssthresh < tp->window_clamp &&
-           (int)tp->rcv_ssthresh < tcp_space(sk) &&
-           !tcp_under_memory_pressure(sk)) {
+       if (room > 0 && !tcp_under_memory_pressure(sk)) {
                int incr;
 
                /* Check #2. Increase window, if skb with such overhead
@@ -419,8 +420,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
 
                if (incr) {
                        incr = max_t(int, incr, 2 * skb->len);
-                       tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
-                                              tp->window_clamp);
+                       tp->rcv_ssthresh += min(room, incr);
                        inet_csk(sk)->icsk_ack.quick |= 1;
                }
        }
index 277d71239d755d858be70663320d8de2ab23dfcc..a2896944aa377b7feef6417720348c02c3d8eecb 100644 (file)
@@ -1673,7 +1673,9 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
        if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
            TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
            ((TCP_SKB_CB(tail)->tcp_flags |
-             TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_URG) ||
+             TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
+           !((TCP_SKB_CB(tail)->tcp_flags &
+             TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
            ((TCP_SKB_CB(tail)->tcp_flags ^
              TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
 #ifdef CONFIG_TLS_DEVICE
@@ -1692,6 +1694,15 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
                if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
                        TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
 
+               /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
+                * thtail->fin, so that the fast path in tcp_rcv_established()
+                * is not entered if we append a packet with a FIN.
+                * SYN, RST, URG are not present.
+                * ACK is set on both packets.
+                * PSH : we do not really care in TCP stack,
+                *       at least for 'GRO' packets.
+                */
+               thtail->fin |= th->fin;
                TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
 
                if (TCP_SKB_CB(skb)->has_rxtstamp) {
@@ -2578,7 +2589,8 @@ static void __net_exit tcp_sk_exit(struct net *net)
 {
        int cpu;
 
-       module_put(net->ipv4.tcp_congestion_control->owner);
+       if (net->ipv4.tcp_congestion_control)
+               module_put(net->ipv4.tcp_congestion_control->owner);
 
        for_each_possible_cpu(cpu)
                inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
index 64f9715173ac8bf3a8d641ae40ef95f67aa7a7a0..065334b41d575aa0ba28de8487a6a5d018ec8804 100644 (file)
@@ -352,6 +352,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
        struct sk_buff *pp = NULL;
        struct udphdr *uh2;
        struct sk_buff *p;
+       unsigned int ulen;
 
        /* requires non zero csum, for symmetry with GSO */
        if (!uh->check) {
@@ -359,6 +360,12 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
                return NULL;
        }
 
+       /* Do not deal with padded or malicious packets, sorry ! */
+       ulen = ntohs(uh->len);
+       if (ulen <= sizeof(*uh) || ulen != skb_gro_len(skb)) {
+               NAPI_GRO_CB(skb)->flush = 1;
+               return NULL;
+       }
        /* pull encapsulating udp header */
        skb_gro_pull(skb, sizeof(struct udphdr));
        skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
@@ -377,13 +384,14 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
 
                /* Terminate the flow on len mismatch or if it grow "too much".
                 * Under small packet flood GRO count could elsewhere grow a lot
-                * leading to execessive truesize values
+                * leading to excessive truesize values.
+                * On len mismatch merge the first packet shorter than gso_size,
+                * otherwise complete the GRO packet.
                 */
-               if (!skb_gro_receive(p, skb) &&
+               if (ulen > ntohs(uh2->len) || skb_gro_receive(p, skb) ||
+                   ulen != ntohs(uh2->len) ||
                    NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX)
                        pp = p;
-               else if (uh->len != uh2->len)
-                       pp = p;
 
                return pp;
        }
index d73a6d6652f60f8b81d47bb36766aa0d0329f3ce..2b144b92ae46a430d184fca120616166e8010f53 100644 (file)
@@ -111,7 +111,8 @@ static void
 _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
 {
        const struct iphdr *iph = ip_hdr(skb);
-       u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
+       int ihl = iph->ihl;
+       u8 *xprth = skb_network_header(skb) + ihl * 4;
        struct flowi4 *fl4 = &fl->u.ip4;
        int oif = 0;
 
@@ -122,6 +123,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
        fl4->flowi4_mark = skb->mark;
        fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
 
+       fl4->flowi4_proto = iph->protocol;
+       fl4->daddr = reverse ? iph->saddr : iph->daddr;
+       fl4->saddr = reverse ? iph->daddr : iph->saddr;
+       fl4->flowi4_tos = iph->tos;
+
        if (!ip_is_fragment(iph)) {
                switch (iph->protocol) {
                case IPPROTO_UDP:
@@ -133,7 +139,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
                            pskb_may_pull(skb, xprth + 4 - skb->data)) {
                                __be16 *ports;
 
-                               xprth = skb_network_header(skb) + iph->ihl * 4;
+                               xprth = skb_network_header(skb) + ihl * 4;
                                ports = (__be16 *)xprth;
 
                                fl4->fl4_sport = ports[!!reverse];
@@ -146,7 +152,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
                            pskb_may_pull(skb, xprth + 2 - skb->data)) {
                                u8 *icmp;
 
-                               xprth = skb_network_header(skb) + iph->ihl * 4;
+                               xprth = skb_network_header(skb) + ihl * 4;
                                icmp = xprth;
 
                                fl4->fl4_icmp_type = icmp[0];
@@ -159,7 +165,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
                            pskb_may_pull(skb, xprth + 4 - skb->data)) {
                                __be32 *ehdr;
 
-                               xprth = skb_network_header(skb) + iph->ihl * 4;
+                               xprth = skb_network_header(skb) + ihl * 4;
                                ehdr = (__be32 *)xprth;
 
                                fl4->fl4_ipsec_spi = ehdr[0];
@@ -171,7 +177,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
                            pskb_may_pull(skb, xprth + 8 - skb->data)) {
                                __be32 *ah_hdr;
 
-                               xprth = skb_network_header(skb) + iph->ihl * 4;
+                               xprth = skb_network_header(skb) + ihl * 4;
                                ah_hdr = (__be32 *)xprth;
 
                                fl4->fl4_ipsec_spi = ah_hdr[1];
@@ -183,7 +189,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
                            pskb_may_pull(skb, xprth + 4 - skb->data)) {
                                __be16 *ipcomp_hdr;
 
-                               xprth = skb_network_header(skb) + iph->ihl * 4;
+                               xprth = skb_network_header(skb) + ihl * 4;
                                ipcomp_hdr = (__be16 *)xprth;
 
                                fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
@@ -196,7 +202,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
                                __be16 *greflags;
                                __be32 *gre_hdr;
 
-                               xprth = skb_network_header(skb) + iph->ihl * 4;
+                               xprth = skb_network_header(skb) + ihl * 4;
                                greflags = (__be16 *)xprth;
                                gre_hdr = (__be32 *)xprth;
 
@@ -213,10 +219,6 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
                        break;
                }
        }
-       fl4->flowi4_proto = iph->protocol;
-       fl4->daddr = reverse ? iph->saddr : iph->daddr;
-       fl4->saddr = reverse ? iph->daddr : iph->saddr;
-       fl4->flowi4_tos = iph->tos;
 }
 
 static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
index d43d076c98f5d30b058a49b5e477e76e9d97aec8..1766325423b5dad4d8c95c85605dc571248ba6d1 100644 (file)
@@ -476,7 +476,7 @@ static int ip6addrlbl_valid_dump_req(const struct nlmsghdr *nlh,
        }
 
        if (nlmsg_attrlen(nlh, sizeof(*ifal))) {
-               NL_SET_ERR_MSG_MOD(extack, "Invalid data after header for address label dump requewst");
+               NL_SET_ERR_MSG_MOD(extack, "Invalid data after header for address label dump request");
                return -EINVAL;
        }
 
index d46b4eb645c2e81993119b9a37405aa4b5eb82b3..cb99f6fb79b798702ecb87be006044fe06dda3c2 100644 (file)
@@ -74,13 +74,13 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
                        goto out;
 
                if (sp->len == XFRM_MAX_DEPTH)
-                       goto out;
+                       goto out_reset;
 
                x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
                                      (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
                                      spi, IPPROTO_ESP, AF_INET6);
                if (!x)
-                       goto out;
+                       goto out_reset;
 
                sp->xvec[sp->len++] = x;
                sp->olen++;
@@ -88,7 +88,7 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
                xo = xfrm_offload(skb);
                if (!xo) {
                        xfrm_state_put(x);
-                       goto out;
+                       goto out_reset;
                }
        }
 
@@ -109,6 +109,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
        xfrm_input(skb, IPPROTO_ESP, spi, -2);
 
        return ERR_PTR(-EINPROGRESS);
+out_reset:
+       secpath_reset(skb);
 out:
        skb_push(skb, offset);
        NAPI_GRO_CB(skb)->same_flow = 0;
index 79d2e43c05c5e792b1498c4bf5f73756252e2c7d..5fc1f4e0c0cf0d3dd403c2dcaf291ea9c096d235 100644 (file)
@@ -417,6 +417,7 @@ int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
 
 done:
        rhashtable_walk_stop(&iter);
+       rhashtable_walk_exit(&iter);
        return ret;
 }
 
index 6613d8dbb0e5a5c3ba883c957e5bc4ba2bf00777..91247a6fc67ff7de1106d028b315a559e53e47f4 100644 (file)
@@ -921,9 +921,7 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i,
                if (pcpu_rt) {
                        struct fib6_info *from;
 
-                       from = rcu_dereference_protected(pcpu_rt->from,
-                                            lockdep_is_held(&table->tb6_lock));
-                       rcu_assign_pointer(pcpu_rt->from, NULL);
+                       from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
                        fib6_info_release(from);
                }
        }
index cb54a8a3c2735221ec0ee1feaa63c28d3383b5cf..be5f3d7ceb966d609121f89a6cc5dcc605834c89 100644 (file)
@@ -94,15 +94,21 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
        return fl;
 }
 
+static void fl_free_rcu(struct rcu_head *head)
+{
+       struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
+
+       if (fl->share == IPV6_FL_S_PROCESS)
+               put_pid(fl->owner.pid);
+       kfree(fl->opt);
+       kfree(fl);
+}
+
 
 static void fl_free(struct ip6_flowlabel *fl)
 {
-       if (fl) {
-               if (fl->share == IPV6_FL_S_PROCESS)
-                       put_pid(fl->owner.pid);
-               kfree(fl->opt);
-               kfree_rcu(fl, rcu);
-       }
+       if (fl)
+               call_rcu(&fl->rcu, fl_free_rcu);
 }
 
 static void fl_release(struct ip6_flowlabel *fl)
@@ -633,9 +639,9 @@ recheck:
                                if (fl1->share == IPV6_FL_S_EXCL ||
                                    fl1->share != fl->share ||
                                    ((fl1->share == IPV6_FL_S_PROCESS) &&
-                                    (fl1->owner.pid == fl->owner.pid)) ||
+                                    (fl1->owner.pid != fl->owner.pid)) ||
                                    ((fl1->share == IPV6_FL_S_USER) &&
-                                    uid_eq(fl1->owner.uid, fl->owner.uid)))
+                                    !uid_eq(fl1->owner.uid, fl->owner.uid)))
                                        goto release;
 
                                err = -ENOMEM;
index b32c95f0212809006455cb79768f96bd1c516994..655e46b227f9eb99e43369ffb96a411bd662eadb 100644 (file)
@@ -525,10 +525,10 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
 }
 
 static int ip6erspan_rcv(struct sk_buff *skb,
-                        struct tnl_ptk_info *tpi)
+                        struct tnl_ptk_info *tpi,
+                        int gre_hdr_len)
 {
        struct erspan_base_hdr *ershdr;
-       struct erspan_metadata *pkt_md;
        const struct ipv6hdr *ipv6h;
        struct erspan_md2 *md2;
        struct ip6_tnl *tunnel;
@@ -547,18 +547,16 @@ static int ip6erspan_rcv(struct sk_buff *skb,
                if (unlikely(!pskb_may_pull(skb, len)))
                        return PACKET_REJECT;
 
-               ershdr = (struct erspan_base_hdr *)skb->data;
-               pkt_md = (struct erspan_metadata *)(ershdr + 1);
-
                if (__iptunnel_pull_header(skb, len,
                                           htons(ETH_P_TEB),
                                           false, false) < 0)
                        return PACKET_REJECT;
 
                if (tunnel->parms.collect_md) {
+                       struct erspan_metadata *pkt_md, *md;
                        struct metadata_dst *tun_dst;
                        struct ip_tunnel_info *info;
-                       struct erspan_metadata *md;
+                       unsigned char *gh;
                        __be64 tun_id;
                        __be16 flags;
 
@@ -571,6 +569,14 @@ static int ip6erspan_rcv(struct sk_buff *skb,
                        if (!tun_dst)
                                return PACKET_REJECT;
 
+                       /* skb can be uncloned in __iptunnel_pull_header, so
+                        * old pkt_md is no longer valid and we need to reset
+                        * it
+                        */
+                       gh = skb_network_header(skb) +
+                            skb_network_header_len(skb);
+                       pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
+                                                           sizeof(*ershdr));
                        info = &tun_dst->u.tun_info;
                        md = ip_tunnel_info_opts(info);
                        md->version = ver;
@@ -607,7 +613,7 @@ static int gre_rcv(struct sk_buff *skb)
 
        if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
                     tpi.proto == htons(ETH_P_ERSPAN2))) {
-               if (ip6erspan_rcv(skb, &tpi) == PACKET_RCVD)
+               if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
                        return 0;
                goto out;
        }
index edbd12067170bc77332d57a04c96812d9702520b..e51f3c648b094afe1d60a518db36a42444c4c55d 100644 (file)
@@ -601,7 +601,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                                inet6_sk(skb->sk) : NULL;
        struct ipv6hdr *tmp_hdr;
        struct frag_hdr *fh;
-       unsigned int mtu, hlen, left, len;
+       unsigned int mtu, hlen, left, len, nexthdr_offset;
        int hroom, troom;
        __be32 frag_id;
        int ptr, offset = 0, err = 0;
@@ -612,6 +612,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                goto fail;
        hlen = err;
        nexthdr = *prevhdr;
+       nexthdr_offset = prevhdr - skb_network_header(skb);
 
        mtu = ip6_skb_dst_mtu(skb);
 
@@ -646,6 +647,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
            (err = skb_checksum_help(skb)))
                goto fail;
 
+       prevhdr = skb_network_header(skb) + nexthdr_offset;
        hroom = LL_RESERVED_SPACE(rt->dst.dev);
        if (skb_has_frag_list(skb)) {
                unsigned int first_len = skb_pagelen(skb);
index 0c6403cf8b5226fbe4bf2e4506b3816b30973b0b..ade1390c63488a60b405ca70052b3493fecc67d5 100644 (file)
@@ -627,7 +627,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
                                           eiph->daddr, eiph->saddr, 0, 0,
                                           IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
-               if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) {
+               if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
                        if (!IS_ERR(rt))
                                ip_rt_put(rt);
                        goto out;
@@ -636,7 +636,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        } else {
                if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
                                   skb2->dev) ||
-                   skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
+                   skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
                        goto out;
        }
 
index 1059894a6f4c3f009a92b30fb257e6b35f3a4a26..4cb83fb69844354d6d5d6ad18d552060e9e28b84 100644 (file)
@@ -210,6 +210,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                psidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
                          ((srh->segments_left + 1) * sizeof(struct in6_addr));
                psid = skb_header_pointer(skb, psidoff, sizeof(_psid), &_psid);
+               if (!psid)
+                       return false;
                if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_PSID,
                                ipv6_masked_addr_cmp(psid, &srhinfo->psid_msk,
                                                     &srhinfo->psid_addr)))
@@ -223,6 +225,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                nsidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
                          ((srh->segments_left - 1) * sizeof(struct in6_addr));
                nsid = skb_header_pointer(skb, nsidoff, sizeof(_nsid), &_nsid);
+               if (!nsid)
+                       return false;
                if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_NSID,
                                ipv6_masked_addr_cmp(nsid, &srhinfo->nsid_msk,
                                                     &srhinfo->nsid_addr)))
@@ -233,6 +237,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
        if (srhinfo->mt_flags & IP6T_SRH_LSID) {
                lsidoff = srhoff + sizeof(struct ipv6_sr_hdr);
                lsid = skb_header_pointer(skb, lsidoff, sizeof(_lsid), &_lsid);
+               if (!lsid)
+                       return false;
                if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LSID,
                                ipv6_masked_addr_cmp(lsid, &srhinfo->lsid_msk,
                                                     &srhinfo->lsid_addr)))
index 4ef4bbdb49d4b203974bf95de47534921baed3f6..0520aca3354b8e47d24e14c7ff2949892435f138 100644 (file)
@@ -379,11 +379,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
                in6_dev_put(idev);
        }
 
-       rcu_read_lock();
-       from = rcu_dereference(rt->from);
-       rcu_assign_pointer(rt->from, NULL);
+       from = xchg((__force struct fib6_info **)&rt->from, NULL);
        fib6_info_release(from);
-       rcu_read_unlock();
 }
 
 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -1040,14 +1037,20 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
        struct rt6_info *nrt;
 
        if (!fib6_info_hold_safe(rt))
-               return NULL;
+               goto fallback;
 
        nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
-       if (nrt)
-               ip6_rt_copy_init(nrt, rt);
-       else
+       if (!nrt) {
                fib6_info_release(rt);
+               goto fallback;
+       }
+
+       ip6_rt_copy_init(nrt, rt);
+       return nrt;
 
+fallback:
+       nrt = dev_net(dev)->ipv6.ip6_null_entry;
+       dst_hold(&nrt->dst);
        return nrt;
 }
 
@@ -1096,10 +1099,6 @@ restart:
                dst_hold(&rt->dst);
        } else {
                rt = ip6_create_rt_rcu(f6i);
-               if (!rt) {
-                       rt = net->ipv6.ip6_null_entry;
-                       dst_hold(&rt->dst);
-               }
        }
 
        rcu_read_unlock();
@@ -1286,9 +1285,7 @@ static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
        /* purge completely the exception to allow releasing the held resources:
         * some [sk] cache may keep the dst around for unlimited time
         */
-       from = rcu_dereference_protected(rt6_ex->rt6i->from,
-                                        lockdep_is_held(&rt6_exception_lock));
-       rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
+       from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
        fib6_info_release(from);
        dst_dev_put(&rt6_ex->rt6i->dst);
 
@@ -2328,6 +2325,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
 
                rcu_read_lock();
                from = rcu_dereference(rt6->from);
+               if (!from) {
+                       rcu_read_unlock();
+                       return;
+               }
                nrt6 = ip6_rt_cache_alloc(from, daddr, saddr);
                if (nrt6) {
                        rt6_do_update_pmtu(nrt6, mtu);
@@ -3391,11 +3392,8 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
 
        rcu_read_lock();
        from = rcu_dereference(rt->from);
-       /* This fib6_info_hold() is safe here because we hold reference to rt
-        * and rt already holds reference to fib6_info.
-        */
-       fib6_info_hold(from);
-       rcu_read_unlock();
+       if (!from)
+               goto out;
 
        nrt = ip6_rt_cache_alloc(from, &msg->dest, NULL);
        if (!nrt)
@@ -3407,10 +3405,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
 
        nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
 
-       /* No need to remove rt from the exception table if rt is
-        * a cached route because rt6_insert_exception() will
-        * takes care of it
-        */
+       /* rt6_insert_exception() will take care of duplicated exceptions */
        if (rt6_insert_exception(nrt, from)) {
                dst_release_immediate(&nrt->dst);
                goto out;
@@ -3423,7 +3418,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
        call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
 
 out:
-       fib6_info_release(from);
+       rcu_read_unlock();
        neigh_release(neigh);
 }
 
@@ -3662,23 +3657,34 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
 
 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
 {
-       int type;
        struct dst_entry *dst = skb_dst(skb);
+       struct net *net = dev_net(dst->dev);
+       struct inet6_dev *idev;
+       int type;
+
+       if (netif_is_l3_master(skb->dev) &&
+           dst->dev == net->loopback_dev)
+               idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
+       else
+               idev = ip6_dst_idev(dst);
+
        switch (ipstats_mib_noroutes) {
        case IPSTATS_MIB_INNOROUTES:
                type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
                if (type == IPV6_ADDR_ANY) {
-                       IP6_INC_STATS(dev_net(dst->dev),
-                                     __in6_dev_get_safely(skb->dev),
-                                     IPSTATS_MIB_INADDRERRORS);
+                       IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
                        break;
                }
                /* FALLTHROUGH */
        case IPSTATS_MIB_OUTNOROUTES:
-               IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
-                             ipstats_mib_noroutes);
+               IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
                break;
        }
+
+       /* Start over by dropping the dst for l3mdev case */
+       if (netif_is_l3_master(skb->dev))
+               skb_dst_drop(skb);
+
        icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
        kfree_skb(skb);
        return 0;
@@ -5011,16 +5017,20 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
 
        rcu_read_lock();
        from = rcu_dereference(rt->from);
-
-       if (fibmatch)
-               err = rt6_fill_node(net, skb, from, NULL, NULL, NULL, iif,
-                                   RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
-                                   nlh->nlmsg_seq, 0);
-       else
-               err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
-                                   &fl6.saddr, iif, RTM_NEWROUTE,
-                                   NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
-                                   0);
+       if (from) {
+               if (fibmatch)
+                       err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
+                                           iif, RTM_NEWROUTE,
+                                           NETLINK_CB(in_skb).portid,
+                                           nlh->nlmsg_seq, 0);
+               else
+                       err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
+                                           &fl6.saddr, iif, RTM_NEWROUTE,
+                                           NETLINK_CB(in_skb).portid,
+                                           nlh->nlmsg_seq, 0);
+       } else {
+               err = -ENETUNREACH;
+       }
        rcu_read_unlock();
 
        if (err < 0) {
index 07e21a82ce4cc2e41af8e38961f9917d357fd20b..b2109b74857d053f52b06c42698cc393d5838609 100644 (file)
@@ -669,6 +669,10 @@ static int ipip6_rcv(struct sk_buff *skb)
                    !net_eq(tunnel->net, dev_net(tunnel->dev))))
                        goto out;
 
+               /* skb can be uncloned in iptunnel_pull_header, so
+                * old iph is no longer valid
+                */
+               iph = (const struct iphdr *)skb_mac_header(skb);
                err = IP_ECN_decapsulate(iph, skb);
                if (unlikely(err)) {
                        if (log_ecn_error)
index 57ef69a1088908fc624ecfca99a728fa296ae0bf..44d431849d391d6903d263ae547fc9bed1e67aa7 100644 (file)
@@ -1110,11 +1110,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
                newnp->ipv6_fl_list = NULL;
                newnp->pktoptions  = NULL;
                newnp->opt         = NULL;
-               newnp->mcast_oif   = tcp_v6_iif(skb);
-               newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
-               newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
+               newnp->mcast_oif   = inet_iif(skb);
+               newnp->mcast_hops  = ip_hdr(skb)->ttl;
+               newnp->rcv_flowinfo = 0;
                if (np->repflow)
-                       newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
+                       newnp->flow_label = 0;
 
                /*
                 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
index b444483cdb2b42ef7acdbd7d23a0c046f55077c2..622eeaf5732b39b97752eefb864133e46b27a15d 100644 (file)
@@ -1047,6 +1047,8 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
                             int addr_len)
 {
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return -EINVAL;
        /* The following checks are replicated from __ip6_datagram_connect()
         * and intended to prevent BPF program called below from accessing
         * bytes that are out of the bound specified by user in addr_len.
index bc65db782bfb1fa49d5e5f9d2a25c77372905feb..d9e5f6808811ae6e008af4947943bd75fcd8e83f 100644 (file)
@@ -345,7 +345,7 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
        unsigned int i;
 
        xfrm_flush_gc();
-       xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
+       xfrm_state_flush(net, 0, false, true);
 
        for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
                WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
@@ -402,6 +402,10 @@ static void __exit xfrm6_tunnel_fini(void)
        xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
        xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
        unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
+       /* Someone maybe has gotten the xfrm6_tunnel_spi.
+        * So need to wait it.
+        */
+       rcu_barrier();
        kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
 }
 
index c5c5ab6c5a1ccdf55eb7891e8c21ea3cdf7d2a28..44fdc641710dbdb64cedb3306ab822573cb97477 100644 (file)
@@ -2054,14 +2054,14 @@ static int __init kcm_init(void)
        if (err)
                goto fail;
 
-       err = sock_register(&kcm_family_ops);
-       if (err)
-               goto sock_register_fail;
-
        err = register_pernet_device(&kcm_net_ops);
        if (err)
                goto net_ops_fail;
 
+       err = sock_register(&kcm_family_ops);
+       if (err)
+               goto sock_register_fail;
+
        err = kcm_proc_init();
        if (err)
                goto proc_init_fail;
@@ -2069,12 +2069,12 @@ static int __init kcm_init(void)
        return 0;
 
 proc_init_fail:
-       unregister_pernet_device(&kcm_net_ops);
-
-net_ops_fail:
        sock_unregister(PF_KCM);
 
 sock_register_fail:
+       unregister_pernet_device(&kcm_net_ops);
+
+net_ops_fail:
        proto_unregister(&kcm_proto);
 
 fail:
@@ -2090,8 +2090,8 @@ fail:
 static void __exit kcm_exit(void)
 {
        kcm_proc_exit();
-       unregister_pernet_device(&kcm_net_ops);
        sock_unregister(PF_KCM);
+       unregister_pernet_device(&kcm_net_ops);
        proto_unregister(&kcm_proto);
        destroy_workqueue(kcm_wq);
 
index 5651c29cb5bd0068d025c9500f6f7513556f65e7..4af1e1d60b9f27b16ecfb65e706ef965a7d598aa 100644 (file)
@@ -1951,8 +1951,10 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
 
        if (rq->sadb_x_ipsecrequest_mode == 0)
                return -EINVAL;
+       if (!xfrm_id_proto_valid(rq->sadb_x_ipsecrequest_proto))
+               return -EINVAL;
 
-       t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */
+       t->id.proto = rq->sadb_x_ipsecrequest_proto;
        if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
                return -EINVAL;
        t->mode = mode;
index fed6becc5daf86afa2ad9188bb28e151244bb5a6..52b5a2797c0c6e85e0cd2f8203616b536b86d178 100644 (file)
@@ -169,8 +169,8 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
 
        rcu_read_lock_bh();
        list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
-               if (tunnel->tunnel_id == tunnel_id) {
-                       l2tp_tunnel_inc_refcount(tunnel);
+               if (tunnel->tunnel_id == tunnel_id &&
+                   refcount_inc_not_zero(&tunnel->ref_count)) {
                        rcu_read_unlock_bh();
 
                        return tunnel;
@@ -190,8 +190,8 @@ struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
 
        rcu_read_lock_bh();
        list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
-               if (++count > nth) {
-                       l2tp_tunnel_inc_refcount(tunnel);
+               if (++count > nth &&
+                   refcount_inc_not_zero(&tunnel->ref_count)) {
                        rcu_read_unlock_bh();
                        return tunnel;
                }
@@ -909,7 +909,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 {
        struct l2tp_tunnel *tunnel;
 
-       tunnel = l2tp_tunnel(sk);
+       tunnel = rcu_dereference_sk_user_data(sk);
        if (tunnel == NULL)
                goto pass_up;
 
index b99e73a7e7e0f2b4959b279e3aecbadf29667d55..2017b7d780f5af73c1ac7461113842776d1b00fc 100644 (file)
@@ -320,14 +320,13 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
        struct llc_sap *sap;
        int rc = -EINVAL;
 
-       dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
-
        lock_sock(sk);
        if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
                goto out;
        rc = -EAFNOSUPPORT;
        if (unlikely(addr->sllc_family != AF_LLC))
                goto out;
+       dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
        rc = -ENODEV;
        rcu_read_lock();
        if (sk->sk_bound_dev_if) {
index cff0fb3578c9a41519984d266bfad2154d039c5d..deb3faf08337288b2c746be8a2863b38f6d168c7 100644 (file)
@@ -841,7 +841,7 @@ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
 
        dir = sdata->vif.debugfs_dir;
 
-       if (!dir)
+       if (IS_ERR_OR_NULL(dir))
                return;
 
        sprintf(buf, "netdev:%s", sdata->name);
index 28d022a3eee305bc9d04531eb6b70d3b57412d93..ae4f0be3b393ba727b95060bb7148ec0cd961440 100644 (file)
@@ -1195,6 +1195,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
 
+       if (local->in_reconfig)
+               return;
+
        if (!check_sdata_in_driver(sdata))
                return;
 
index e03c46ac8e4d82a7b16f5d70b5ea4a66db6d82db..c62101857b9b919d3dfdc0e839b122a2268e486e 100644 (file)
@@ -112,8 +112,9 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
                             IEEE80211_HT_CAP_TX_STBC);
 
        /* Allow user to configure RX STBC bits */
-       if (ht_capa_mask->cap_info & IEEE80211_HT_CAP_RX_STBC)
-               ht_cap->cap |= ht_capa->cap_info & IEEE80211_HT_CAP_RX_STBC;
+       if (ht_capa_mask->cap_info & cpu_to_le16(IEEE80211_HT_CAP_RX_STBC))
+               ht_cap->cap |= le16_to_cpu(ht_capa->cap_info) &
+                                       IEEE80211_HT_CAP_RX_STBC;
 
        /* Allow user to decrease AMPDU factor */
        if (ht_capa_mask->ampdu_params_info &
index 4a6ff1482a9ffe4bb775317cfcfff83da982faad..02d2e6f11e936fb54814b909849116de173a91c9 100644 (file)
@@ -1908,6 +1908,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
        list_del_rcu(&sdata->list);
        mutex_unlock(&sdata->local->iflist_mtx);
 
+       if (sdata->vif.txq)
+               ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq));
+
        synchronize_rcu();
 
        if (sdata->dev) {
index 4700718e010f5a886001e9a0a0326a628edf0739..37e372896230a08c6a9214f88ce54e7ad823d352 100644 (file)
@@ -167,8 +167,10 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
                 * The driver doesn't know anything about VLAN interfaces.
                 * Hence, don't send GTKs for VLAN interfaces to the driver.
                 */
-               if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+                       ret = 1;
                        goto out_unsupported;
+               }
        }
 
        ret = drv_set_key(key->local, SET_KEY, sdata,
@@ -213,11 +215,8 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
                /* all of these we can do in software - if driver can */
                if (ret == 1)
                        return 0;
-               if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) {
-                       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
-                               return 0;
+               if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL))
                        return -EINVAL;
-               }
                return 0;
        default:
                return -EINVAL;
index 95eb5064fa9166220bf67af98dedf83726ffcdc8..b76a2aefa9ec05e5162ab565a108b5b98848116f 100644 (file)
@@ -23,7 +23,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
 static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
 {
        /* Use last four bytes of hw addr as hash index */
-       return jhash_1word(*(u32 *)(addr+2), seed);
+       return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
 }
 
 static const struct rhashtable_params mesh_rht_params = {
index 7f8d93401ce070f9e2e61ce6a84e5ab8768b5811..bf0b187f994e9c56e191d2045f405cb6e6bac336 100644 (file)
@@ -1568,7 +1568,15 @@ static void sta_ps_start(struct sta_info *sta)
                return;
 
        for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
-               if (txq_has_queue(sta->sta.txq[tid]))
+               struct ieee80211_txq *txq = sta->sta.txq[tid];
+               struct txq_info *txqi = to_txq_info(txq);
+
+               spin_lock(&local->active_txq_lock[txq->ac]);
+               if (!list_empty(&txqi->schedule_order))
+                       list_del_init(&txqi->schedule_order);
+               spin_unlock(&local->active_txq_lock[txq->ac]);
+
+               if (txq_has_queue(txq))
                        set_bit(tid, &sta->txq_buffered_tids);
                else
                        clear_bit(tid, &sta->txq_buffered_tids);
index 366b9e6f043e2df89eccb4d63a9fb3ab1d7db023..40141df09f255fac46043f67656e98e16adda5b9 100644 (file)
@@ -1,4 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions of this file
+ * Copyright (C) 2019 Intel Corporation
+ */
+
 #ifdef CONFIG_MAC80211_MESSAGE_TRACING
 
 #if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -11,7 +16,7 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM mac80211_msg
 
-#define MAX_MSG_LEN    100
+#define MAX_MSG_LEN    120
 
 DECLARE_EVENT_CLASS(mac80211_msg_event,
        TP_PROTO(struct va_format *vaf),
index 8a49a74c0a374815ca2f374510216b334eb00013..2e816dd67be72d161bf1959554d293f2f6725673 100644 (file)
@@ -3221,6 +3221,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        u8 max_subframes = sta->sta.max_amsdu_subframes;
        int max_frags = local->hw.max_tx_fragments;
        int max_amsdu_len = sta->sta.max_amsdu_len;
+       int orig_truesize;
        __be16 len;
        void *data;
        bool ret = false;
@@ -3261,6 +3262,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        if (!head || skb_is_gso(head))
                goto out;
 
+       orig_truesize = head->truesize;
        orig_len = head->len;
 
        if (skb->len + head->len > max_amsdu_len)
@@ -3318,6 +3320,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        *frag_tail = skb;
 
 out_recalc:
+       fq->memory_usage += head->truesize - orig_truesize;
        if (head->len != orig_len) {
                flow->backlog += head->len - orig_len;
                tin->backlog_bytes += head->len - orig_len;
@@ -3646,16 +3649,17 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue);
 struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
 {
        struct ieee80211_local *local = hw_to_local(hw);
+       struct ieee80211_txq *ret = NULL;
        struct txq_info *txqi = NULL;
 
-       lockdep_assert_held(&local->active_txq_lock[ac]);
+       spin_lock_bh(&local->active_txq_lock[ac]);
 
  begin:
        txqi = list_first_entry_or_null(&local->active_txqs[ac],
                                        struct txq_info,
                                        schedule_order);
        if (!txqi)
-               return NULL;
+               goto out;
 
        if (txqi->txq.sta) {
                struct sta_info *sta = container_of(txqi->txq.sta,
@@ -3672,24 +3676,30 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
 
 
        if (txqi->schedule_round == local->schedule_round[ac])
-               return NULL;
+               goto out;
 
        list_del_init(&txqi->schedule_order);
        txqi->schedule_round = local->schedule_round[ac];
-       return &txqi->txq;
+       ret = &txqi->txq;
+
+out:
+       spin_unlock_bh(&local->active_txq_lock[ac]);
+       return ret;
 }
 EXPORT_SYMBOL(ieee80211_next_txq);
 
-void ieee80211_return_txq(struct ieee80211_hw *hw,
-                         struct ieee80211_txq *txq)
+void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
+                             struct ieee80211_txq *txq,
+                             bool force)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        struct txq_info *txqi = to_txq_info(txq);
 
-       lockdep_assert_held(&local->active_txq_lock[txq->ac]);
+       spin_lock_bh(&local->active_txq_lock[txq->ac]);
 
        if (list_empty(&txqi->schedule_order) &&
-           (!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets)) {
+           (force || !skb_queue_empty(&txqi->frags) ||
+            txqi->tin.backlog_packets)) {
                /* If airtime accounting is active, always enqueue STAs at the
                 * head of the list to ensure that they only get moved to the
                 * back by the airtime DRR scheduler once they have a negative
@@ -3706,20 +3716,10 @@ void ieee80211_return_txq(struct ieee80211_hw *hw,
                        list_add_tail(&txqi->schedule_order,
                                      &local->active_txqs[txq->ac]);
        }
-}
-EXPORT_SYMBOL(ieee80211_return_txq);
 
-void ieee80211_schedule_txq(struct ieee80211_hw *hw,
-                           struct ieee80211_txq *txq)
-       __acquires(txq_lock) __releases(txq_lock)
-{
-       struct ieee80211_local *local = hw_to_local(hw);
-
-       spin_lock_bh(&local->active_txq_lock[txq->ac]);
-       ieee80211_return_txq(hw, txq);
        spin_unlock_bh(&local->active_txq_lock[txq->ac]);
 }
-EXPORT_SYMBOL(ieee80211_schedule_txq);
+EXPORT_SYMBOL(__ieee80211_schedule_txq);
 
 bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
                                struct ieee80211_txq *txq)
@@ -3729,7 +3729,7 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
        struct sta_info *sta;
        u8 ac = txq->ac;
 
-       lockdep_assert_held(&local->active_txq_lock[ac]);
+       spin_lock_bh(&local->active_txq_lock[ac]);
 
        if (!txqi->txq.sta)
                goto out;
@@ -3759,34 +3759,27 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
 
        sta->airtime[ac].deficit += sta->airtime_weight;
        list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
+       spin_unlock_bh(&local->active_txq_lock[ac]);
 
        return false;
 out:
        if (!list_empty(&txqi->schedule_order))
                list_del_init(&txqi->schedule_order);
+       spin_unlock_bh(&local->active_txq_lock[ac]);
 
        return true;
 }
 EXPORT_SYMBOL(ieee80211_txq_may_transmit);
 
 void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
-       __acquires(txq_lock)
 {
        struct ieee80211_local *local = hw_to_local(hw);
 
        spin_lock_bh(&local->active_txq_lock[ac]);
        local->schedule_round[ac]++;
-}
-EXPORT_SYMBOL(ieee80211_txq_schedule_start);
-
-void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
-       __releases(txq_lock)
-{
-       struct ieee80211_local *local = hw_to_local(hw);
-
        spin_unlock_bh(&local->active_txq_lock[ac]);
 }
-EXPORT_SYMBOL(ieee80211_txq_schedule_end);
+EXPORT_SYMBOL(ieee80211_txq_schedule_start);
 
 void __ieee80211_subif_start_xmit(struct sk_buff *skb,
                                  struct net_device *dev,
index dda8930f20e790c77c808674f6e35b133bb5657a..f3a8557494d60e4d1ffe1f89fa32ea00c13eabab 100644 (file)
@@ -140,9 +140,15 @@ static int mpls_xmit(struct sk_buff *skb)
        if (rt)
                err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway,
                                 skb);
-       else if (rt6)
-               err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway,
-                                skb);
+       else if (rt6) {
+               if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
+                       /* 6PE (RFC 4798) */
+                       err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt6->rt6i_gateway.s6_addr32[3],
+                                        skb);
+               } else
+                       err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway,
+                                        skb);
+       }
        if (err)
                net_dbg_ratelimited("%s: packet transmission failed: %d\n",
                                    __func__, err);
index 5d782445d2fcf629367777f415e000eb326eab2a..bad17bba8ba786f589212a2575d346850bab6300 100644 (file)
@@ -251,6 +251,10 @@ static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
        }
 
        attr = nla_nest_start(skb, NCSI_ATTR_PACKAGE_LIST);
+       if (!attr) {
+               rc = -EMSGSIZE;
+               goto err;
+       }
        rc = ncsi_write_package_info(skb, ndp, package->id);
        if (rc) {
                nla_nest_cancel(skb, attr);
index dc07fcc7938ec4da2b95e43530fffc0f5aefe82b..802db01e30754cfa66861acc555bf5b02d158df1 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 #include <linux/skbuff.h>
 
 #include <net/ncsi.h>
@@ -667,7 +668,10 @@ static int ncsi_rsp_handler_oem_bcm_gma(struct ncsi_request *nr)
        ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        memcpy(saddr.sa_data, &rsp->data[BCM_MAC_ADDR_OFFSET], ETH_ALEN);
        /* Increase mac address by 1 for BMC's address */
-       saddr.sa_data[ETH_ALEN - 1]++;
+       eth_addr_inc((u8 *)saddr.sa_data);
+       if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
+               return -ENXIO;
+
        ret = ops->ndo_set_mac_address(ndev, &saddr);
        if (ret < 0)
                netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
index d43ffb09939bd3641b213b826a3e0229bcdbb550..6548271209a05c2fce99628c9b23d2cedbf8a087 100644 (file)
@@ -1007,6 +1007,7 @@ config NETFILTER_XT_TARGET_TEE
        depends on NETFILTER_ADVANCED
        depends on IPV6 || IPV6=n
        depends on !NF_CONNTRACK || NF_CONNTRACK
+       depends on IP6_NF_IPTABLES || !IP6_NF_IPTABLES
        select NF_DUP_IPV4
        select NF_DUP_IPV6 if IP6_NF_IPTABLES
        ---help---
index 43bbaa32b1d65cbbec89d439d2ca9bd6bfe77cf3..14457551bcb4edca3047320028be0a331d185e14 100644 (file)
@@ -1678,7 +1678,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
        if (!cp) {
                int v;
 
-               if (!sysctl_schedule_icmp(ipvs))
+               if (ipip || !sysctl_schedule_icmp(ipvs))
                        return NF_ACCEPT;
 
                if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
index 82bfbeef46afa53fde8d428533999b382713f053..2a714527cde17aee1152f33bc7f9b041cd5eb087 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/slab.h>
 #include <linux/random.h>
 #include <linux/jhash.h>
+#include <linux/siphash.h>
 #include <linux/err.h>
 #include <linux/percpu.h>
 #include <linux/moduleparam.h>
@@ -449,6 +450,40 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
 }
 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
 
+/* Generate a almost-unique pseudo-id for a given conntrack.
+ *
+ * intentionally doesn't re-use any of the seeds used for hash
+ * table location, we assume id gets exposed to userspace.
+ *
+ * Following nf_conn items do not change throughout lifetime
+ * of the nf_conn after it has been committed to main hash table:
+ *
+ * 1. nf_conn address
+ * 2. nf_conn->ext address
+ * 3. nf_conn->master address (normally NULL)
+ * 4. tuple
+ * 5. the associated net namespace
+ */
+u32 nf_ct_get_id(const struct nf_conn *ct)
+{
+       static __read_mostly siphash_key_t ct_id_seed;
+       unsigned long a, b, c, d;
+
+       net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
+
+       a = (unsigned long)ct;
+       b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
+       c = (unsigned long)ct->ext;
+       d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
+                                  &ct_id_seed);
+#ifdef CONFIG_64BIT
+       return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
+#else
+       return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
+#endif
+}
+EXPORT_SYMBOL_GPL(nf_ct_get_id);
+
 static void
 clean_from_lists(struct nf_conn *ct)
 {
@@ -982,12 +1017,9 @@ __nf_conntrack_confirm(struct sk_buff *skb)
 
        /* set conntrack timestamp, if enabled. */
        tstamp = nf_conn_tstamp_find(ct);
-       if (tstamp) {
-               if (skb->tstamp == 0)
-                       __net_timestamp(skb);
+       if (tstamp)
+               tstamp->start = ktime_get_real_ns();
 
-               tstamp->start = ktime_to_ns(skb->tstamp);
-       }
        /* Since the lookup is lockless, hash insertion must be done after
         * starting the timer and setting the CONFIRMED bit. The RCU barriers
         * guarantee that no other CPU can find the conntrack before the above
@@ -1350,6 +1382,7 @@ __nf_conntrack_alloc(struct net *net,
        /* save hash for reusing when confirming */
        *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
        ct->status = 0;
+       ct->timeout = 0;
        write_pnet(&ct->ct_net, net);
        memset(&ct->__nfct_init_offset[0], 0,
               offsetof(struct nf_conn, proto) -
index 66c596d287a5dc44cea26680023e8c12798a5261..d7f61b0547c65c5e85a2080481906d2918a1eddf 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/siphash.h>
 
 #include <linux/netfilter.h>
 #include <net/netlink.h>
@@ -485,7 +486,9 @@ nla_put_failure:
 
 static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
 {
-       if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
+       __be32 id = (__force __be32)nf_ct_get_id(ct);
+
+       if (nla_put_be32(skb, CTA_ID, id))
                goto nla_put_failure;
        return 0;
 
@@ -1286,8 +1289,9 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
        }
 
        if (cda[CTA_ID]) {
-               u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
-               if (id != (u32)(unsigned long)ct) {
+               __be32 id = nla_get_be32(cda[CTA_ID]);
+
+               if (id != (__force __be32)nf_ct_get_id(ct)) {
                        nf_ct_put(ct);
                        return -ENOENT;
                }
@@ -2692,6 +2696,25 @@ nla_put_failure:
 
 static const union nf_inet_addr any_addr;
 
+static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
+{
+       static __read_mostly siphash_key_t exp_id_seed;
+       unsigned long a, b, c, d;
+
+       net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
+
+       a = (unsigned long)exp;
+       b = (unsigned long)exp->helper;
+       c = (unsigned long)exp->master;
+       d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed);
+
+#ifdef CONFIG_64BIT
+       return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed);
+#else
+       return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed);
+#endif
+}
+
 static int
 ctnetlink_exp_dump_expect(struct sk_buff *skb,
                          const struct nf_conntrack_expect *exp)
@@ -2739,7 +2762,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
        }
 #endif
        if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
-           nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
+           nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) ||
            nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
            nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
                goto nla_put_failure;
@@ -3044,7 +3067,8 @@ static int ctnetlink_get_expect(struct net *net, struct sock *ctnl,
 
        if (cda[CTA_EXPECT_ID]) {
                __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
-               if (ntohl(id) != (u32)(unsigned long)exp) {
+
+               if (id != nf_expect_get_id(exp)) {
                        nf_ct_expect_put(exp);
                        return -ENOENT;
                }
index b9403a266a2e20c1651585a5c76beb8a65365609..37bb530d848fa2fa1d9f95e4f8174150260dbd38 100644 (file)
@@ -55,7 +55,7 @@ void nf_l4proto_log_invalid(const struct sk_buff *skb,
        struct va_format vaf;
        va_list args;
 
-       if (net->ct.sysctl_log_invalid != protonum ||
+       if (net->ct.sysctl_log_invalid != protonum &&
            net->ct.sysctl_log_invalid != IPPROTO_RAW)
                return;
 
index 7df477996b1642412faf22d4f08aa518d75f2649..9becac9535873cf7459579a70c3e3d60c055601b 100644 (file)
@@ -103,49 +103,94 @@ int nf_conntrack_icmp_packet(struct nf_conn *ct,
        return NF_ACCEPT;
 }
 
-/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
-static int
-icmp_error_message(struct nf_conn *tmpl, struct sk_buff *skb,
-                  const struct nf_hook_state *state)
+/* Check inner header is related to any of the existing connections */
+int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb,
+                           unsigned int dataoff,
+                           const struct nf_hook_state *state,
+                           u8 l4proto, union nf_inet_addr *outer_daddr)
 {
        struct nf_conntrack_tuple innertuple, origtuple;
        const struct nf_conntrack_tuple_hash *h;
        const struct nf_conntrack_zone *zone;
        enum ip_conntrack_info ctinfo;
        struct nf_conntrack_zone tmp;
+       union nf_inet_addr *ct_daddr;
+       enum ip_conntrack_dir dir;
+       struct nf_conn *ct;
 
        WARN_ON(skb_nfct(skb));
        zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
 
        /* Are they talking about one of our connections? */
-       if (!nf_ct_get_tuplepr(skb,
-                              skb_network_offset(skb) + ip_hdrlen(skb)
-                                                      + sizeof(struct icmphdr),
-                              PF_INET, state->net, &origtuple)) {
-               pr_debug("icmp_error_message: failed to get tuple\n");
+       if (!nf_ct_get_tuplepr(skb, dataoff,
+                              state->pf, state->net, &origtuple))
                return -NF_ACCEPT;
-       }
 
        /* Ordinarily, we'd expect the inverted tupleproto, but it's
           been preserved inside the ICMP. */
-       if (!nf_ct_invert_tuple(&innertuple, &origtuple)) {
-               pr_debug("icmp_error_message: no match\n");
+       if (!nf_ct_invert_tuple(&innertuple, &origtuple))
                return -NF_ACCEPT;
-       }
-
-       ctinfo = IP_CT_RELATED;
 
        h = nf_conntrack_find_get(state->net, zone, &innertuple);
-       if (!h) {
-               pr_debug("icmp_error_message: no match\n");
+       if (!h)
+               return -NF_ACCEPT;
+
+       /* Consider: A -> T (=This machine) -> B
+        *   Conntrack entry will look like this:
+        *      Original:  A->B
+        *      Reply:     B->T (SNAT case) OR A
+        *
+        * When this function runs, we got packet that looks like this:
+        * iphdr|icmphdr|inner_iphdr|l4header (tcp, udp, ..).
+        *
+        * Above nf_conntrack_find_get() makes lookup based on inner_hdr,
+        * so we should expect that destination of the found connection
+        * matches outer header destination address.
+        *
+        * In above example, we can consider these two cases:
+        *  1. Error coming in reply direction from B or M (middle box) to
+        *     T (SNAT case) or A.
+        *     Inner saddr will be B, dst will be T or A.
+        *     The found conntrack will be reply tuple (B->T/A).
+        *  2. Error coming in original direction from A or M to B.
+        *     Inner saddr will be A, inner daddr will be B.
+        *     The found conntrack will be original tuple (A->B).
+        *
+        * In both cases, conntrack[dir].dst == inner.dst.
+        *
+        * A bogus packet could look like this:
+        *   Inner: B->T
+        *   Outer: B->X (other machine reachable by T).
+        *
+        * In this case, lookup yields connection A->B and will
+        * set packet from B->X as *RELATED*, even though no connection
+        * from X was ever seen.
+        */
+       ct = nf_ct_tuplehash_to_ctrack(h);
+       dir = NF_CT_DIRECTION(h);
+       ct_daddr = &ct->tuplehash[dir].tuple.dst.u3;
+       if (!nf_inet_addr_cmp(outer_daddr, ct_daddr)) {
+               if (state->pf == AF_INET) {
+                       nf_l4proto_log_invalid(skb, state->net, state->pf,
+                                              l4proto,
+                                              "outer daddr %pI4 != inner %pI4",
+                                              &outer_daddr->ip, &ct_daddr->ip);
+               } else if (state->pf == AF_INET6) {
+                       nf_l4proto_log_invalid(skb, state->net, state->pf,
+                                              l4proto,
+                                              "outer daddr %pI6 != inner %pI6",
+                                              &outer_daddr->ip6, &ct_daddr->ip6);
+               }
+               nf_ct_put(ct);
                return -NF_ACCEPT;
        }
 
-       if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
+       ctinfo = IP_CT_RELATED;
+       if (dir == IP_CT_DIR_REPLY)
                ctinfo += IP_CT_IS_REPLY;
 
        /* Update skb to refer to this connection */
-       nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo);
+       nf_ct_set(skb, ct, ctinfo);
        return NF_ACCEPT;
 }
 
@@ -162,11 +207,12 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
                              struct sk_buff *skb, unsigned int dataoff,
                              const struct nf_hook_state *state)
 {
+       union nf_inet_addr outer_daddr;
        const struct icmphdr *icmph;
        struct icmphdr _ih;
 
        /* Not enough header? */
-       icmph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ih), &_ih);
+       icmph = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
        if (icmph == NULL) {
                icmp_error_log(skb, state, "short packet");
                return -NF_ACCEPT;
@@ -199,7 +245,12 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
            icmph->type != ICMP_REDIRECT)
                return NF_ACCEPT;
 
-       return icmp_error_message(tmpl, skb, state);
+       memset(&outer_daddr, 0, sizeof(outer_daddr));
+       outer_daddr.ip = ip_hdr(skb)->daddr;
+
+       dataoff += sizeof(*icmph);
+       return nf_conntrack_inet_error(tmpl, skb, dataoff, state,
+                                      IPPROTO_ICMP, &outer_daddr);
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
index bec4a321165858b828abd0e4449c85afcdf6aeb0..c63ee361285551c2c154ae8fa7dbbc0c56ea7ee2 100644 (file)
@@ -123,51 +123,6 @@ int nf_conntrack_icmpv6_packet(struct nf_conn *ct,
        return NF_ACCEPT;
 }
 
-static int
-icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
-                    struct sk_buff *skb,
-                    unsigned int icmp6off)
-{
-       struct nf_conntrack_tuple intuple, origtuple;
-       const struct nf_conntrack_tuple_hash *h;
-       enum ip_conntrack_info ctinfo;
-       struct nf_conntrack_zone tmp;
-
-       WARN_ON(skb_nfct(skb));
-
-       /* Are they talking about one of our connections? */
-       if (!nf_ct_get_tuplepr(skb,
-                              skb_network_offset(skb)
-                               + sizeof(struct ipv6hdr)
-                               + sizeof(struct icmp6hdr),
-                              PF_INET6, net, &origtuple)) {
-               pr_debug("icmpv6_error: Can't get tuple\n");
-               return -NF_ACCEPT;
-       }
-
-       /* Ordinarily, we'd expect the inverted tupleproto, but it's
-          been preserved inside the ICMP. */
-       if (!nf_ct_invert_tuple(&intuple, &origtuple)) {
-               pr_debug("icmpv6_error: Can't invert tuple\n");
-               return -NF_ACCEPT;
-       }
-
-       ctinfo = IP_CT_RELATED;
-
-       h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp),
-                                 &intuple);
-       if (!h) {
-               pr_debug("icmpv6_error: no match\n");
-               return -NF_ACCEPT;
-       } else {
-               if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
-                       ctinfo += IP_CT_IS_REPLY;
-       }
-
-       /* Update skb to refer to this connection */
-       nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo);
-       return NF_ACCEPT;
-}
 
 static void icmpv6_error_log(const struct sk_buff *skb,
                             const struct nf_hook_state *state,
@@ -182,6 +137,7 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
                              unsigned int dataoff,
                              const struct nf_hook_state *state)
 {
+       union nf_inet_addr outer_daddr;
        const struct icmp6hdr *icmp6h;
        struct icmp6hdr _ih;
        int type;
@@ -210,7 +166,11 @@ int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
        if (icmp6h->icmp6_type >= 128)
                return NF_ACCEPT;
 
-       return icmpv6_error_message(state->net, tmpl, skb, dataoff);
+       memcpy(&outer_daddr.ip6, &ipv6_hdr(skb)->daddr,
+              sizeof(outer_daddr.ip6));
+       dataoff += sizeof(*icmp6h);
+       return nf_conntrack_inet_error(tmpl, skb, dataoff, state,
+                                      IPPROTO_ICMPV6, &outer_daddr);
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
index f067c6b508572a9ab31bc4b3b6281e1482ffdc6d..39fcc1ed18f3501b3120fc9aeffbe44e27fda933 100644 (file)
@@ -20,9 +20,9 @@
 #include <linux/udp.h>
 #include <linux/tcp.h>
 #include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
 
-#include <net/route.h>
-#include <net/ip6_route.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/nf_conntrack_expect.h>
@@ -871,38 +871,33 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
        } else if (sip_external_media) {
                struct net_device *dev = skb_dst(skb)->dev;
                struct net *net = dev_net(dev);
-               struct rtable *rt;
-               struct flowi4 fl4 = {};
-#if IS_ENABLED(CONFIG_IPV6)
-               struct flowi6 fl6 = {};
-#endif
+               struct flowi fl;
                struct dst_entry *dst = NULL;
 
+               memset(&fl, 0, sizeof(fl));
+
                switch (nf_ct_l3num(ct)) {
                        case NFPROTO_IPV4:
-                               fl4.daddr = daddr->ip;
-                               rt = ip_route_output_key(net, &fl4);
-                               if (!IS_ERR(rt))
-                                       dst = &rt->dst;
+                               fl.u.ip4.daddr = daddr->ip;
+                               nf_ip_route(net, &dst, &fl, false);
                                break;
 
-#if IS_ENABLED(CONFIG_IPV6)
                        case NFPROTO_IPV6:
-                               fl6.daddr = daddr->in6;
-                               dst = ip6_route_output(net, NULL, &fl6);
-                               if (dst->error) {
-                                       dst_release(dst);
-                                       dst = NULL;
-                               }
+                               fl.u.ip6.daddr = daddr->in6;
+                               nf_ip6_route(net, &dst, &fl, false);
                                break;
-#endif
                }
 
                /* Don't predict any conntracks when media endpoint is reachable
                 * through the same interface as the signalling peer.
                 */
-               if (dst && dst->dev == dev)
-                       return NF_ACCEPT;
+               if (dst) {
+                       bool external_media = (dst->dev == dev);
+
+                       dst_release(dst);
+                       if (external_media)
+                               return NF_ACCEPT;
+               }
        }
 
        /* We need to check whether the registration exists before attempting
index af7dc65377584d26f4b5d98ef55dd06f93d8107d..000952719adfdf49bf35a53dd800c6cecf45c14f 100644 (file)
@@ -415,9 +415,14 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
        case IPPROTO_ICMPV6:
                /* id is same for either direction... */
                keyptr = &tuple->src.u.icmp.id;
-               min = range->min_proto.icmp.id;
-               range_size = ntohs(range->max_proto.icmp.id) -
-                            ntohs(range->min_proto.icmp.id) + 1;
+               if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
+                       min = 0;
+                       range_size = 65536;
+               } else {
+                       min = ntohs(range->min_proto.icmp.id);
+                       range_size = ntohs(range->max_proto.icmp.id) -
+                                    ntohs(range->min_proto.icmp.id) + 1;
+               }
                goto find_free_id;
 #if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
        case IPPROTO_GRE:
index 513f931186043f2ded3f1844374decd79768c44b..1606eaa5ae0da368f4a692264456e18dfe27ec8d 100644 (file)
@@ -1545,7 +1545,7 @@ static int nft_chain_parse_hook(struct net *net,
                if (IS_ERR(type))
                        return PTR_ERR(type);
        }
-       if (!(type->hook_mask & (1 << hook->num)))
+       if (hook->num > NF_MAX_HOOKS || !(type->hook_mask & (1 << hook->num)))
                return -EOPNOTSUPP;
 
        if (type->type == NFT_CHAIN_T_NAT &&
@@ -2806,8 +2806,11 @@ err2:
        nf_tables_rule_release(&ctx, rule);
 err1:
        for (i = 0; i < n; i++) {
-               if (info[i].ops != NULL)
+               if (info[i].ops) {
                        module_put(info[i].ops->type->owner);
+                       if (info[i].ops->type->release_ops)
+                               info[i].ops->type->release_ops(info[i].ops);
+               }
        }
        kvfree(info);
        return err;
index b1f9c5303f026a14c799d03b579b9e7b577b6dc8..0b3347570265c4edc1b176f450bf920a0b81e5d4 100644 (file)
@@ -540,7 +540,7 @@ __build_packet_message(struct nfnl_log_net *log,
                        goto nla_put_failure;
        }
 
-       if (skb->tstamp) {
+       if (hooknum <= NF_INET_FORWARD && skb->tstamp) {
                struct nfulnl_msg_packet_timestamp ts;
                struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
                ts.sec = cpu_to_be64(kts.tv_sec);
index 0dcc3592d053ff41f7d8e25119d1a2fd7a90c74a..e057b2961d313cd426f2f2d37ed7e1a40c101174 100644 (file)
@@ -582,7 +582,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
        if (nfqnl_put_bridge(entry, skb) < 0)
                goto nla_put_failure;
 
-       if (entskb->tstamp) {
+       if (entry->state.hook <= NF_INET_FORWARD && entskb->tstamp) {
                struct nfqnl_msg_packet_timestamp ts;
                struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
 
index 457a9ceb46af2061546da95f46d05c3578c1826a..8dfa798ea68330645c1dea590d05e98539ca2aa7 100644 (file)
@@ -65,21 +65,34 @@ nla_put_failure:
        return -1;
 }
 
-static void nft_objref_destroy(const struct nft_ctx *ctx,
-                              const struct nft_expr *expr)
+static void nft_objref_deactivate(const struct nft_ctx *ctx,
+                                 const struct nft_expr *expr,
+                                 enum nft_trans_phase phase)
 {
        struct nft_object *obj = nft_objref_priv(expr);
 
+       if (phase == NFT_TRANS_COMMIT)
+               return;
+
        obj->use--;
 }
 
+static void nft_objref_activate(const struct nft_ctx *ctx,
+                               const struct nft_expr *expr)
+{
+       struct nft_object *obj = nft_objref_priv(expr);
+
+       obj->use++;
+}
+
 static struct nft_expr_type nft_objref_type;
 static const struct nft_expr_ops nft_objref_ops = {
        .type           = &nft_objref_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_object *)),
        .eval           = nft_objref_eval,
        .init           = nft_objref_init,
-       .destroy        = nft_objref_destroy,
+       .activate       = nft_objref_activate,
+       .deactivate     = nft_objref_deactivate,
        .dump           = nft_objref_dump,
 };
 
index f8092926f704add7a7cc6843b89d9a509a5db046..a340cd8a751b483766e4ed7274ce0fb2c2b193e2 100644 (file)
@@ -233,5 +233,5 @@ module_exit(nft_redir_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
-MODULE_ALIAS_NFT_AF_EXPR(AF_INET4, "redir");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "redir");
 MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "redir");
index fa61208371f8b222ceb43388773f5c19c691d764..321a0036fdf5b95cc8d356b63ad47fc76826498e 100644 (file)
@@ -308,10 +308,6 @@ static void *nft_rbtree_deactivate(const struct net *net,
                else if (d > 0)
                        parent = parent->rb_right;
                else {
-                       if (!nft_set_elem_active(&rbe->ext, genmask)) {
-                               parent = parent->rb_left;
-                               continue;
-                       }
                        if (nft_rbtree_interval_end(rbe) &&
                            !nft_rbtree_interval_end(this)) {
                                parent = parent->rb_left;
@@ -320,6 +316,9 @@ static void *nft_rbtree_deactivate(const struct net *net,
                                   nft_rbtree_interval_end(this)) {
                                parent = parent->rb_right;
                                continue;
+                       } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
+                               parent = parent->rb_left;
+                               continue;
                        }
                        nft_rbtree_flush(net, set, rbe);
                        return rbe;
index c13bcd0ab491304da6ddcaa3f59aefa17ab5eacc..8dbb4d48f2ed5995dedaa8eb4f4b18a0ba91acb2 100644 (file)
@@ -163,19 +163,24 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par)
        s64 stamp;
 
        /*
-        * We cannot use get_seconds() instead of __net_timestamp() here.
+        * We need real time here, but we can neither use skb->tstamp
+        * nor __net_timestamp().
+        *
+        * skb->tstamp and skb->skb_mstamp_ns overlap, however, they
+        * use different clock types (real vs monotonic).
+        *
         * Suppose you have two rules:
-        *      1. match before 13:00
-        *      2. match after 13:00
+        *      1. match before 13:00
+        *      2. match after 13:00
+        *
         * If you match against processing time (get_seconds) it
         * may happen that the same packet matches both rules if
-        * it arrived at the right moment before 13:00.
+        * it arrived at the right moment before 13:00, so it would be
+        * better to check skb->tstamp and set it via __net_timestamp()
+        * if needed.  This however breaks outgoing packets tx timestamp,
+        * and causes them to get delayed forever by fq packet scheduler.
         */
-       if (skb->tstamp == 0)
-               __net_timestamp((struct sk_buff *)skb);
-
-       stamp = ktime_to_ns(skb->tstamp);
-       stamp = div_s64(stamp, NSEC_PER_SEC);
+       stamp = get_seconds();
 
        if (info->flags & XT_TIME_LOCAL_TZ)
                /* Adjust for local timezone */
index f28e937320a3b453371143a035e6967482d17cd4..216ab915dd54d4ad7f205aac9f0ab3e3291a2684 100644 (file)
@@ -988,7 +988,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
        struct netlink_sock *nlk = nlk_sk(sk);
        struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
        int err = 0;
-       unsigned long groups = nladdr->nl_groups;
+       unsigned long groups;
        bool bound;
 
        if (addr_len < sizeof(struct sockaddr_nl))
@@ -996,6 +996,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
 
        if (nladdr->nl_family != AF_NETLINK)
                return -EINVAL;
+       groups = nladdr->nl_groups;
 
        /* Only superuser is allowed to listen multicasts */
        if (groups) {
index 25eeb6d2a75a69059f387be103345e844284f743..cb69d35c8e6adc31e07c07f58170c48b6d9e9c3a 100644 (file)
@@ -362,11 +362,11 @@ int genl_register_family(struct genl_family *family)
        } else
                family->attrbuf = NULL;
 
-       family->id = idr_alloc(&genl_fam_idr, family,
-                              start, end + 1, GFP_KERNEL);
+       family->id = idr_alloc_cyclic(&genl_fam_idr, family,
+                                     start, end + 1, GFP_KERNEL);
        if (family->id < 0) {
                err = family->id;
-               goto errout_locked;
+               goto errout_free;
        }
 
        err = genl_validate_assign_mc_groups(family);
@@ -385,6 +385,7 @@ int genl_register_family(struct genl_family *family)
 
 errout_remove:
        idr_remove(&genl_fam_idr, family->id);
+errout_free:
        kfree(family->attrbuf);
 errout_locked:
        genl_unlock_all();
index 1d3144d1990352f4eb8942220e03e225e01af19f..71ffd1a6dc7c6063c00f4c82f985fe9fc0d80dc0 100644 (file)
@@ -1392,18 +1392,22 @@ static int __init nr_proto_init(void)
        int i;
        int rc = proto_register(&nr_proto, 0);
 
-       if (rc != 0)
-               goto out;
+       if (rc)
+               return rc;
 
        if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
-               printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n");
-               return -1;
+               pr_err("NET/ROM: %s - nr_ndevs parameter too large\n",
+                      __func__);
+               rc = -EINVAL;
+               goto unregister_proto;
        }
 
        dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL);
-       if (dev_nr == NULL) {
-               printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
-               return -1;
+       if (!dev_nr) {
+               pr_err("NET/ROM: %s - unable to allocate device array\n",
+                      __func__);
+               rc = -ENOMEM;
+               goto unregister_proto;
        }
 
        for (i = 0; i < nr_ndevs; i++) {
@@ -1413,13 +1417,13 @@ static int __init nr_proto_init(void)
                sprintf(name, "nr%d", i);
                dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
                if (!dev) {
-                       printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
+                       rc = -ENOMEM;
                        goto fail;
                }
 
                dev->base_addr = i;
-               if (register_netdev(dev)) {
-                       printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
+               rc = register_netdev(dev);
+               if (rc) {
                        free_netdev(dev);
                        goto fail;
                }
@@ -1427,36 +1431,64 @@ static int __init nr_proto_init(void)
                dev_nr[i] = dev;
        }
 
-       if (sock_register(&nr_family_ops)) {
-               printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n");
+       rc = sock_register(&nr_family_ops);
+       if (rc)
                goto fail;
-       }
 
-       register_netdevice_notifier(&nr_dev_notifier);
+       rc = register_netdevice_notifier(&nr_dev_notifier);
+       if (rc)
+               goto out_sock;
 
        ax25_register_pid(&nr_pid);
        ax25_linkfail_register(&nr_linkfail_notifier);
 
 #ifdef CONFIG_SYSCTL
-       nr_register_sysctl();
+       rc = nr_register_sysctl();
+       if (rc)
+               goto out_sysctl;
 #endif
 
        nr_loopback_init();
 
-       proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops);
-       proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops);
-       proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops);
-out:
-       return rc;
+       rc = -ENOMEM;
+       if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops))
+               goto proc_remove1;
+       if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net,
+                            &nr_neigh_seqops))
+               goto proc_remove2;
+       if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net,
+                            &nr_node_seqops))
+               goto proc_remove3;
+
+       return 0;
+
+proc_remove3:
+       remove_proc_entry("nr_neigh", init_net.proc_net);
+proc_remove2:
+       remove_proc_entry("nr", init_net.proc_net);
+proc_remove1:
+
+       nr_loopback_clear();
+       nr_rt_free();
+
+#ifdef CONFIG_SYSCTL
+       nr_unregister_sysctl();
+out_sysctl:
+#endif
+       ax25_linkfail_release(&nr_linkfail_notifier);
+       ax25_protocol_release(AX25_P_NETROM);
+       unregister_netdevice_notifier(&nr_dev_notifier);
+out_sock:
+       sock_unregister(PF_NETROM);
 fail:
        while (--i >= 0) {
                unregister_netdev(dev_nr[i]);
                free_netdev(dev_nr[i]);
        }
        kfree(dev_nr);
+unregister_proto:
        proto_unregister(&nr_proto);
-       rc = -1;
-       goto out;
+       return rc;
 }
 
 module_init(nr_proto_init);
index 215ad22a96476ebb9d30919e99d67bda8e1ce88f..93d13f01998133a2b6c6b3256bb19679f14cea65 100644 (file)
@@ -70,7 +70,7 @@ static void nr_loopback_timer(struct timer_list *unused)
        }
 }
 
-void __exit nr_loopback_clear(void)
+void nr_loopback_clear(void)
 {
        del_timer_sync(&loopback_timer);
        skb_queue_purge(&loopback_queue);
index 6485f593e2f09bc3f215e2ad2c638154de738487..b76aa668a94bce6c6d1280d5cbf307d6ce94e013 100644 (file)
@@ -953,7 +953,7 @@ const struct seq_operations nr_neigh_seqops = {
 /*
  *     Free all memory associated with the nodes and routes lists.
  */
-void __exit nr_rt_free(void)
+void nr_rt_free(void)
 {
        struct nr_neigh *s = NULL;
        struct nr_node  *t = NULL;
index ba1c368b3f186e140149a75e8d98dee24587a020..771011b84270e87854a8c47db1c0253640449fcc 100644 (file)
@@ -146,9 +146,12 @@ static struct ctl_table nr_table[] = {
        { }
 };
 
-void __init nr_register_sysctl(void)
+int __init nr_register_sysctl(void)
 {
        nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
+       if (!nr_table_header)
+               return -ENOMEM;
+       return 0;
 }
 
 void nr_unregister_sysctl(void)
index ae296273ce3db96cdaeafba66a7ff460d8a59794..17dcd0b5eb3287989d5a72a19194bc5674f3cb1e 100644 (file)
@@ -726,6 +726,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
        llcp_sock->service_name = kmemdup(addr->service_name,
                                          llcp_sock->service_name_len,
                                          GFP_KERNEL);
+       if (!llcp_sock->service_name) {
+               ret = -ENOMEM;
+               goto sock_llcp_release;
+       }
 
        nfc_llcp_sock_link(&local->connecting_sockets, sk);
 
@@ -745,10 +749,11 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
        return ret;
 
 sock_unlink:
-       nfc_llcp_put_ssap(local, llcp_sock->ssap);
-
        nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
 
+sock_llcp_release:
+       nfc_llcp_put_ssap(local, llcp_sock->ssap);
+
 put_dev:
        nfc_put_device(dev);
 
index ddfc52ac1f9b4391cb8b6e0f107658b1ee011565..c0d323b58e732318cc352be35bf940693b9bd028 100644 (file)
@@ -312,6 +312,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe,
                create_info = (struct nci_hci_create_pipe_resp *)skb->data;
                dest_gate = create_info->dest_gate;
                new_pipe = create_info->pipe;
+               if (new_pipe >= NCI_HCI_MAX_PIPES) {
+                       status = NCI_HCI_ANY_E_NOK;
+                       goto exit;
+               }
 
                /* Save the new created pipe and bind with local gate,
                 * the description for skb->data[3] is destination gate id
@@ -336,6 +340,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe,
                        goto exit;
                }
                delete_info = (struct nci_hci_delete_pipe_noti *)skb->data;
+               if (delete_info->pipe >= NCI_HCI_MAX_PIPES) {
+                       status = NCI_HCI_ANY_E_NOK;
+                       goto exit;
+               }
 
                ndev->hci_dev->pipes[delete_info->pipe].gate =
                                                NCI_HCI_INVALID_GATE;
index 6679e96ab1dcdf8761845b863c39e1b6aac20d2e..9dd158ab51b310e28354237ff3bcd101a8b829f5 100644 (file)
@@ -448,6 +448,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 
        upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
                             0, upcall_info->cmd);
+       if (!upcall) {
+               err = -EINVAL;
+               goto out;
+       }
        upcall->dp_ifindex = dp_ifindex;
 
        err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
@@ -460,6 +464,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 
        if (upcall_info->egress_tun_info) {
                nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
+               if (!nla) {
+                       err = -EMSGSIZE;
+                       goto out;
+               }
                err = ovs_nla_put_tunnel_info(user_skb,
                                              upcall_info->egress_tun_info);
                BUG_ON(err);
@@ -468,6 +476,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 
        if (upcall_info->actions_len) {
                nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
+               if (!nla) {
+                       err = -EMSGSIZE;
+                       goto out;
+               }
                err = ovs_nla_put_actions(upcall_info->actions,
                                          upcall_info->actions_len,
                                          user_skb);
index 691da853bef5cb801d963cae4e9bf7b23a3dddd6..4bdf5e3ac2087a67e715ebd720159205697668a8 100644 (file)
@@ -2306,14 +2306,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
 
        struct sw_flow_actions *acts;
        int new_acts_size;
-       int req_size = NLA_ALIGN(attr_len);
+       size_t req_size = NLA_ALIGN(attr_len);
        int next_offset = offsetof(struct sw_flow_actions, actions) +
                                        (*sfa)->actions_len;
 
        if (req_size <= (ksize(*sfa) - next_offset))
                goto out;
 
-       new_acts_size = ksize(*sfa) * 2;
+       new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
 
        if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
                if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
index 8376bc1c1508170aa333f8feaa8a154908e0eb3c..9b81813dd16af490859ab32ae59cf919c9e15ed6 100644 (file)
@@ -1852,7 +1852,8 @@ oom:
 
 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
 {
-       if (!skb->protocol && sock->type == SOCK_RAW) {
+       if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
+           sock->type == SOCK_RAW) {
                skb_reset_mac_header(skb);
                skb->protocol = dev_parse_header_protocol(skb);
        }
@@ -2601,8 +2602,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        void *ph;
        DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
        bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
+       unsigned char *addr = NULL;
        int tp_len, size_max;
-       unsigned char *addr;
        void *data;
        int len_sum = 0;
        int status = TP_STATUS_AVAILABLE;
@@ -2613,7 +2614,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        if (likely(saddr == NULL)) {
                dev     = packet_cached_dev_get(po);
                proto   = po->num;
-               addr    = NULL;
        } else {
                err = -EINVAL;
                if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2623,10 +2623,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
                                                sll_addr)))
                        goto out;
                proto   = saddr->sll_protocol;
-               addr    = saddr->sll_halen ? saddr->sll_addr : NULL;
                dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
-               if (addr && dev && saddr->sll_halen < dev->addr_len)
-                       goto out_put;
+               if (po->sk.sk_socket->type == SOCK_DGRAM) {
+                       if (dev && msg->msg_namelen < dev->addr_len +
+                                  offsetof(struct sockaddr_ll, sll_addr))
+                               goto out_put;
+                       addr = saddr->sll_addr;
+               }
        }
 
        err = -ENXIO;
@@ -2798,7 +2801,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        struct sk_buff *skb;
        struct net_device *dev;
        __be16 proto;
-       unsigned char *addr;
+       unsigned char *addr = NULL;
        int err, reserve = 0;
        struct sockcm_cookie sockc;
        struct virtio_net_hdr vnet_hdr = { 0 };
@@ -2815,7 +2818,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        if (likely(saddr == NULL)) {
                dev     = packet_cached_dev_get(po);
                proto   = po->num;
-               addr    = NULL;
        } else {
                err = -EINVAL;
                if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2823,10 +2825,13 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
                if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
                        goto out;
                proto   = saddr->sll_protocol;
-               addr    = saddr->sll_halen ? saddr->sll_addr : NULL;
                dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
-               if (addr && dev && saddr->sll_halen < dev->addr_len)
-                       goto out_unlock;
+               if (sock->type == SOCK_DGRAM) {
+                       if (dev && msg->msg_namelen < dev->addr_len +
+                                  offsetof(struct sockaddr_ll, sll_addr))
+                               goto out_unlock;
+                       addr = saddr->sll_addr;
+               }
        }
 
        err = -ENXIO;
@@ -3243,7 +3248,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
        }
 
        mutex_lock(&net->packet.sklist_lock);
-       sk_add_node_rcu(sk, &net->packet.sklist);
+       sk_add_node_tail_rcu(sk, &net->packet.sklist);
        mutex_unlock(&net->packet.sklist_lock);
 
        preempt_disable();
@@ -3343,20 +3348,29 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
        sock_recv_ts_and_drops(msg, sk, skb);
 
        if (msg->msg_name) {
+               int copy_len;
+
                /* If the address length field is there to be filled
                 * in, we fill it in now.
                 */
                if (sock->type == SOCK_PACKET) {
                        __sockaddr_check_size(sizeof(struct sockaddr_pkt));
                        msg->msg_namelen = sizeof(struct sockaddr_pkt);
+                       copy_len = msg->msg_namelen;
                } else {
                        struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
 
                        msg->msg_namelen = sll->sll_halen +
                                offsetof(struct sockaddr_ll, sll_addr);
+                       copy_len = msg->msg_namelen;
+                       if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
+                               memset(msg->msg_name +
+                                      offsetof(struct sockaddr_ll, sll_addr),
+                                      0, sizeof(sll->sll_addr));
+                               msg->msg_namelen = sizeof(struct sockaddr_ll);
+                       }
                }
-               memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
-                      msg->msg_namelen);
+               memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
        }
 
        if (pkt_sk(sk)->auxdata) {
@@ -4209,7 +4223,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
        struct pgv *pg_vec;
        int i;
 
-       pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
+       pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
        if (unlikely(!pg_vec))
                goto out;
 
index d6cc97fbbbb02458d958a8f493e37e6249db4db6..2b969f99ef1311f845baea874a985714cb051c7c 100644 (file)
@@ -543,6 +543,9 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
        struct rds_sock *rs = rds_sk_to_rs(sk);
        int ret = 0;
 
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return -EINVAL;
+
        lock_sock(sk);
 
        switch (uaddr->sa_family) {
index 17c9d9f0c8483b4b0a887e69e7caac246c369423..0f4398e7f2a7add7c20b6fdd333c40af4e719c92 100644 (file)
@@ -173,6 +173,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        /* We allow an RDS socket to be bound to either IPv4 or IPv6
         * address.
         */
+       if (addr_len < offsetofend(struct sockaddr, sa_family))
+               return -EINVAL;
        if (uaddr->sa_family == AF_INET) {
                struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
 
index 31cf37da4510c3b53377ea40d4880638a89775e5..93c0437e6a5fd284b3e6dd1283e31839a305be7b 100644 (file)
@@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
        else
                pool = rds_ibdev->mr_1m_pool;
 
+       if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+               queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+
+       /* Switch pools if one of the pool is reaching upper limit */
+       if (atomic_read(&pool->dirty_count) >=  pool->max_items * 9 / 10) {
+               if (pool->pool_type == RDS_IB_MR_8K_POOL)
+                       pool = rds_ibdev->mr_1m_pool;
+               else
+                       pool = rds_ibdev->mr_8k_pool;
+       }
+
        ibmr = rds_ib_try_reuse_ibmr(pool);
        if (ibmr)
                return ibmr;
index 63c8d107adcfbec096b3dbcead8de98ec6327bc1..d664e9ade74dea264c06e0ac03997ebdc0254235 100644 (file)
@@ -454,9 +454,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
        struct rds_ib_mr *ibmr = NULL;
        int iter = 0;
 
-       if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
-               queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
-
        while (1) {
                ibmr = rds_ib_reuse_mr(pool);
                if (ibmr)
index 70559854837ee1d46dbb91c8dc9b1d1b5b4bb969..8946c89d739231efb659b3d50ddc1e3b14cc6b60 100644 (file)
@@ -772,7 +772,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
        unsigned long frag_off;
        unsigned long to_copy;
        unsigned long copied;
-       uint64_t uncongested = 0;
+       __le64 uncongested = 0;
        void *addr;
 
        /* catch completely corrupt packets */
@@ -789,7 +789,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
        copied = 0;
 
        while (copied < RDS_CONG_MAP_BYTES) {
-               uint64_t *src, *dst;
+               __le64 *src, *dst;
                unsigned int k;
 
                to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
@@ -824,9 +824,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
        }
 
        /* the congestion map is in little endian order */
-       uncongested = le64_to_cpu(uncongested);
-
-       rds_cong_map_updated(map, uncongested);
+       rds_cong_map_updated(map, le64_to_cpu(uncongested));
 }
 
 static void rds_ib_process_recv(struct rds_connection *conn,
index fd2694174607405ab96f6f0dea10bc8dcc8caea9..faf726e00e27c75b11721dbc55518ca60bdf00a6 100644 (file)
@@ -608,7 +608,7 @@ static void rds_tcp_kill_sock(struct net *net)
        list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
                struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
 
-               if (net != c_net || !tc->t_sock)
+               if (net != c_net)
                        continue;
                if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
                        list_move_tail(&tc->t_tcp_node, &tmp_list);
index 7af4f99c4a9321bb3eef8d77f7e2dedf981e19f3..094a6621f8e803ae41101899ef02f080d91ac0f3 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 
 static struct sk_buff_head loopback_queue;
+#define ROSE_LOOPBACK_LIMIT 1000
 static struct timer_list loopback_timer;
 
 static void rose_set_loopback_timer(void);
@@ -35,29 +36,27 @@ static int rose_loopback_running(void)
 
 int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
 {
-       struct sk_buff *skbn;
+       struct sk_buff *skbn = NULL;
 
-       skbn = skb_clone(skb, GFP_ATOMIC);
+       if (skb_queue_len(&loopback_queue) < ROSE_LOOPBACK_LIMIT)
+               skbn = skb_clone(skb, GFP_ATOMIC);
 
-       kfree_skb(skb);
-
-       if (skbn != NULL) {
+       if (skbn) {
+               consume_skb(skb);
                skb_queue_tail(&loopback_queue, skbn);
 
                if (!rose_loopback_running())
                        rose_set_loopback_timer();
+       } else {
+               kfree_skb(skb);
        }
 
        return 1;
 }
 
-
 static void rose_set_loopback_timer(void)
 {
-       del_timer(&loopback_timer);
-
-       loopback_timer.expires  = jiffies + 10;
-       add_timer(&loopback_timer);
+       mod_timer(&loopback_timer, jiffies + 10);
 }
 
 static void rose_loopback_timer(struct timer_list *unused)
@@ -68,8 +67,12 @@ static void rose_loopback_timer(struct timer_list *unused)
        struct sock *sk;
        unsigned short frametype;
        unsigned int lci_i, lci_o;
+       int count;
 
-       while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
+       for (count = 0; count < ROSE_LOOPBACK_LIMIT; count++) {
+               skb = skb_dequeue(&loopback_queue);
+               if (!skb)
+                       return;
                if (skb->len < ROSE_MIN_LEN) {
                        kfree_skb(skb);
                        continue;
@@ -106,6 +109,8 @@ static void rose_loopback_timer(struct timer_list *unused)
                        kfree_skb(skb);
                }
        }
+       if (!skb_queue_empty(&loopback_queue))
+               mod_timer(&loopback_timer, jiffies + 1);
 }
 
 void __exit rose_loopback_clear(void)
index 7ca57741b2fbbbc8f5ccf139f5ffbe56b969c458..7849f286bb9331dbfce00e58cbe0c325a36894f5 100644 (file)
@@ -105,16 +105,17 @@ void rose_write_internal(struct sock *sk, int frametype)
        struct sk_buff *skb;
        unsigned char  *dptr;
        unsigned char  lci1, lci2;
-       char buffer[100];
-       int len, faclen = 0;
+       int maxfaclen = 0;
+       int len, faclen;
+       int reserve;
 
-       len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
+       reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1;
+       len = ROSE_MIN_LEN;
 
        switch (frametype) {
        case ROSE_CALL_REQUEST:
                len   += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
-               faclen = rose_create_facilities(buffer, rose);
-               len   += faclen;
+               maxfaclen = 256;
                break;
        case ROSE_CALL_ACCEPTED:
        case ROSE_CLEAR_REQUEST:
@@ -123,15 +124,16 @@ void rose_write_internal(struct sock *sk, int frametype)
                break;
        }
 
-       if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
+       skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
+       if (!skb)
                return;
 
        /*
         *      Space for AX.25 header and PID.
         */
-       skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
+       skb_reserve(skb, reserve);
 
-       dptr = skb_put(skb, skb_tailroom(skb));
+       dptr = skb_put(skb, len);
 
        lci1 = (rose->lci >> 8) & 0x0F;
        lci2 = (rose->lci >> 0) & 0xFF;
@@ -146,7 +148,8 @@ void rose_write_internal(struct sock *sk, int frametype)
                dptr   += ROSE_ADDR_LEN;
                memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
                dptr   += ROSE_ADDR_LEN;
-               memcpy(dptr, buffer, faclen);
+               faclen = rose_create_facilities(dptr, rose);
+               skb_put(skb, faclen);
                dptr   += faclen;
                break;
 
index 96f2952bbdfd6e62ffcec87f0a565378abbfe4f5..ae8c5d7f3bf1e29460e5b96b05b7b1b1ecd4ce15 100644 (file)
@@ -135,7 +135,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
        struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr;
        struct rxrpc_local *local;
        struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
-       u16 service_id = srx->srx_service;
+       u16 service_id;
        int ret;
 
        _enter("%p,%p,%d", rx, saddr, len);
@@ -143,6 +143,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
        ret = rxrpc_validate_address(rx, srx, len);
        if (ret < 0)
                goto error;
+       service_id = srx->srx_service;
 
        lock_sock(&rx->sk);
 
@@ -370,18 +371,22 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
  * rxrpc_kernel_check_life - Check to see whether a call is still alive
  * @sock: The socket the call is on
  * @call: The call to check
+ * @_life: Where to store the life value
  *
  * Allow a kernel service to find out whether a call is still alive - ie. we're
- * getting ACKs from the server.  Returns a number representing the life state
- * which can be compared to that returned by a previous call.
+ * getting ACKs from the server.  Passes back in *_life a number representing
+ * the life state which can be compared to that returned by a previous call and
+ * return true if the call is still alive.
  *
  * If the life state stalls, rxrpc_kernel_probe_life() should be called and
  * then 2RTT waited.
  */
-u32 rxrpc_kernel_check_life(const struct socket *sock,
-                           const struct rxrpc_call *call)
+bool rxrpc_kernel_check_life(const struct socket *sock,
+                            const struct rxrpc_call *call,
+                            u32 *_life)
 {
-       return call->acks_latest;
+       *_life = call->acks_latest;
+       return call->state != RXRPC_CALL_COMPLETE;
 }
 EXPORT_SYMBOL(rxrpc_kernel_check_life);
 
index 4b1a534d290a79e3f035ee60766b4f2ebb2e35c2..062ca9dc29b8ab2fa7381c606791d4fd39657962 100644 (file)
@@ -654,6 +654,7 @@ struct rxrpc_call {
        u8                      ackr_reason;    /* reason to ACK */
        u16                     ackr_skew;      /* skew on packet being ACK'd */
        rxrpc_serial_t          ackr_serial;    /* serial of packet being ACK'd */
+       rxrpc_serial_t          ackr_first_seq; /* first sequence number received */
        rxrpc_seq_t             ackr_prev_seq;  /* previous sequence number received */
        rxrpc_seq_t             ackr_consumed;  /* Highest packet shown consumed */
        rxrpc_seq_t             ackr_seen;      /* Highest packet shown seen */
index 8aa2937b069f78a7b296704e773398c722b66749..fe96881a334daff644a1f9d01497771f745e9fc8 100644 (file)
@@ -604,30 +604,30 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
 
        _enter("");
 
-       if (list_empty(&rxnet->calls))
-               return;
+       if (!list_empty(&rxnet->calls)) {
+               write_lock(&rxnet->call_lock);
 
-       write_lock(&rxnet->call_lock);
+               while (!list_empty(&rxnet->calls)) {
+                       call = list_entry(rxnet->calls.next,
+                                         struct rxrpc_call, link);
+                       _debug("Zapping call %p", call);
 
-       while (!list_empty(&rxnet->calls)) {
-               call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
-               _debug("Zapping call %p", call);
+                       rxrpc_see_call(call);
+                       list_del_init(&call->link);
 
-               rxrpc_see_call(call);
-               list_del_init(&call->link);
+                       pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
+                              call, atomic_read(&call->usage),
+                              rxrpc_call_states[call->state],
+                              call->flags, call->events);
 
-               pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
-                      call, atomic_read(&call->usage),
-                      rxrpc_call_states[call->state],
-                      call->flags, call->events);
+                       write_unlock(&rxnet->call_lock);
+                       cond_resched();
+                       write_lock(&rxnet->call_lock);
+               }
 
                write_unlock(&rxnet->call_lock);
-               cond_resched();
-               write_lock(&rxnet->call_lock);
        }
 
-       write_unlock(&rxnet->call_lock);
-
        atomic_dec(&rxnet->nr_calls);
        wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
 }
index b6fca8ebb1173f4de1047e96315c26072666c2e9..8d31fb4c51e17c1934face0f4320dfd219525a66 100644 (file)
@@ -153,7 +153,8 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
  * pass a connection-level abort onto all calls on that connection
  */
 static void rxrpc_abort_calls(struct rxrpc_connection *conn,
-                             enum rxrpc_call_completion compl)
+                             enum rxrpc_call_completion compl,
+                             rxrpc_serial_t serial)
 {
        struct rxrpc_call *call;
        int i;
@@ -173,6 +174,9 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
                                                  call->call_id, 0,
                                                  conn->abort_code,
                                                  conn->error);
+                       else
+                               trace_rxrpc_rx_abort(call, serial,
+                                                    conn->abort_code);
                        if (rxrpc_set_call_completion(call, compl,
                                                      conn->abort_code,
                                                      conn->error))
@@ -213,8 +217,6 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
        conn->state = RXRPC_CONN_LOCALLY_ABORTED;
        spin_unlock_bh(&conn->state_lock);
 
-       rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED);
-
        msg.msg_name    = &conn->params.peer->srx.transport;
        msg.msg_namelen = conn->params.peer->srx.transport_len;
        msg.msg_control = NULL;
@@ -242,6 +244,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
        len = iov[0].iov_len + iov[1].iov_len;
 
        serial = atomic_inc_return(&conn->serial);
+       rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, serial);
        whdr.serial = htonl(serial);
        _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
 
@@ -321,7 +324,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
                conn->error = -ECONNABORTED;
                conn->abort_code = abort_code;
                conn->state = RXRPC_CONN_REMOTELY_ABORTED;
-               rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED);
+               rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
                return -ECONNABORTED;
 
        case RXRPC_PACKET_TYPE_CHALLENGE:
index 9128aa0e40aac8f51a84f10dc0bd0dd5933c1e23..c2c35cf4e3089038bcc73663f0a0d3ccf24b9743 100644 (file)
@@ -837,7 +837,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
                u8 acks[RXRPC_MAXACKS];
        } buf;
        rxrpc_serial_t acked_serial;
-       rxrpc_seq_t first_soft_ack, hard_ack;
+       rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
        int nr_acks, offset, ioffset;
 
        _enter("");
@@ -851,13 +851,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
 
        acked_serial = ntohl(buf.ack.serial);
        first_soft_ack = ntohl(buf.ack.firstPacket);
+       prev_pkt = ntohl(buf.ack.previousPacket);
        hard_ack = first_soft_ack - 1;
        nr_acks = buf.ack.nAcks;
        summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
                              buf.ack.reason : RXRPC_ACK__INVALID);
 
        trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
-                          first_soft_ack, ntohl(buf.ack.previousPacket),
+                          first_soft_ack, prev_pkt,
                           summary.ack_reason, nr_acks);
 
        if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
@@ -878,8 +879,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
                                  rxrpc_propose_ack_respond_to_ack);
        }
 
-       /* Discard any out-of-order or duplicate ACKs. */
-       if (before_eq(sp->hdr.serial, call->acks_latest))
+       /* Discard any out-of-order or duplicate ACKs (outside lock). */
+       if (before(first_soft_ack, call->ackr_first_seq) ||
+           before(prev_pkt, call->ackr_prev_seq))
                return;
 
        buf.info.rxMTU = 0;
@@ -890,12 +892,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
 
        spin_lock(&call->input_lock);
 
-       /* Discard any out-of-order or duplicate ACKs. */
-       if (before_eq(sp->hdr.serial, call->acks_latest))
+       /* Discard any out-of-order or duplicate ACKs (inside lock). */
+       if (before(first_soft_ack, call->ackr_first_seq) ||
+           before(prev_pkt, call->ackr_prev_seq))
                goto out;
        call->acks_latest_ts = skb->tstamp;
        call->acks_latest = sp->hdr.serial;
 
+       call->ackr_first_seq = first_soft_ack;
+       call->ackr_prev_seq = prev_pkt;
+
        /* Parse rwind and mtu sizes if provided. */
        if (buf.info.rxMTU)
                rxrpc_input_ackinfo(call, skb, &buf.info);
@@ -1155,19 +1161,19 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
  * handle data received on the local endpoint
  * - may be called in interrupt context
  *
- * The socket is locked by the caller and this prevents the socket from being
- * shut down and the local endpoint from going away, thus sk_user_data will not
- * be cleared until this function returns.
+ * [!] Note that as this is called from the encap_rcv hook, the socket is not
+ * held locked by the caller and nothing prevents sk_user_data on the UDP from
+ * being cleared in the middle of processing this function.
  *
  * Called with the RCU read lock held from the IP layer via UDP.
  */
 int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
 {
+       struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
        struct rxrpc_connection *conn;
        struct rxrpc_channel *chan;
        struct rxrpc_call *call = NULL;
        struct rxrpc_skb_priv *sp;
-       struct rxrpc_local *local = udp_sk->sk_user_data;
        struct rxrpc_peer *peer = NULL;
        struct rxrpc_sock *rx = NULL;
        unsigned int channel;
@@ -1175,6 +1181,10 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
 
        _enter("%p", udp_sk);
 
+       if (unlikely(!local)) {
+               kfree_skb(skb);
+               return 0;
+       }
        if (skb->tstamp == 0)
                skb->tstamp = ktime_get_real();
 
index 15cf42d5b53a56d8d19cabdc8c2b55156d73d28a..01959db51445ca00e6044d8a849e698c4ab17a33 100644 (file)
@@ -304,7 +304,8 @@ nomem:
        ret = -ENOMEM;
 sock_error:
        mutex_unlock(&rxnet->local_mutex);
-       kfree(local);
+       if (local)
+               call_rcu(&local->rcu, rxrpc_local_rcu);
        _leave(" = %d", ret);
        return ERR_PTR(ret);
 
index 736aa92811004cfe5d157abd4827710783f8d57c..004c762c2e8d063cfda32c0f93325fb779f08737 100644 (file)
@@ -335,7 +335,6 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
        struct kvec iov[2];
        rxrpc_serial_t serial;
        size_t len;
-       bool lost = false;
        int ret, opt;
 
        _enter(",{%d}", skb->len);
@@ -393,14 +392,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
                static int lose;
                if ((lose++ & 7) == 7) {
                        ret = 0;
-                       lost = true;
+                       trace_rxrpc_tx_data(call, sp->hdr.seq, serial,
+                                           whdr.flags, retrans, true);
+                       goto done;
                }
        }
 
-       trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
-                           retrans, lost);
-       if (lost)
-               goto done;
+       trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, retrans,
+                           false);
 
        /* send the packet with the don't fragment bit set if we currently
         * think it's small enough */
index bc05af89fc381daa46d7cf8032c9900dfbcea65c..6e84d878053c7b8821483c0c1447a5c338d5fade 100644 (file)
@@ -157,6 +157,11 @@ void rxrpc_error_report(struct sock *sk)
 
        _enter("%p{%d}", sk, local->debug_id);
 
+       /* Clear the outstanding error value on the socket so that it doesn't
+        * cause kernel_sendmsg() to return it later.
+        */
+       sock_error(sk);
+
        skb = sock_dequeue_err_skb(sk);
        if (!skb) {
                _leave("UDP socket errqueue empty");
index 46c9312085b1ba81b4941607f751a07adb8f3c20..bec64deb7b0a2794345c896827846fa8bac57e19 100644 (file)
@@ -152,12 +152,13 @@ static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call,
 }
 
 /*
- * Queue a DATA packet for transmission, set the resend timeout and send the
- * packet immediately
+ * Queue a DATA packet for transmission, set the resend timeout and send
+ * the packet immediately.  Returns the error from rxrpc_send_data_packet()
+ * in case the caller wants to do something with it.
  */
-static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
-                              struct sk_buff *skb, bool last,
-                              rxrpc_notify_end_tx_t notify_end_tx)
+static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
+                             struct sk_buff *skb, bool last,
+                             rxrpc_notify_end_tx_t notify_end_tx)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        unsigned long now;
@@ -250,7 +251,8 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
 
 out:
        rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
-       _leave("");
+       _leave(" = %d", ret);
+       return ret;
 }
 
 /*
@@ -423,9 +425,10 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
                        if (ret < 0)
                                goto out;
 
-                       rxrpc_queue_packet(rx, call, skb,
-                                          !msg_data_left(msg) && !more,
-                                          notify_end_tx);
+                       ret = rxrpc_queue_packet(rx, call, skb,
+                                                !msg_data_left(msg) && !more,
+                                                notify_end_tx);
+                       /* Should check for failure here */
                        skb = NULL;
                }
        } while (msg_data_left(msg) > 0);
index 1b9afdee5ba976ba64200d8f85050cf053b7d65c..5c02ad97ef239a5eb22df8b22be80010a77b0151 100644 (file)
@@ -358,8 +358,7 @@ config NET_SCH_PIE
        help
          Say Y here if you want to use the Proportional Integral controller
          Enhanced scheduler packet scheduling algorithm.
-         For more information, please see
-         http://tools.ietf.org/html/draft-pan-tsvwg-pie-00
+         For more information, please see https://tools.ietf.org/html/rfc8033
 
          To compile this driver as a module, choose M here: the module
          will be called sch_pie.
index aecf1bf233c8362673812b5ab212f32e5f868a5b..5a87e271d35a2416b3589888bcfadee0c31b2142 100644 (file)
 #include <net/act_api.h>
 #include <net/netlink.h>
 
-static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp)
-{
-       u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK;
-
-       if (!tp)
-               return -EINVAL;
-       a->goto_chain = tcf_chain_get_by_act(tp->chain->block, chain_index);
-       if (!a->goto_chain)
-               return -ENOMEM;
-       return 0;
-}
-
-static void tcf_action_goto_chain_fini(struct tc_action *a)
-{
-       tcf_chain_put_by_act(a->goto_chain);
-}
-
 static void tcf_action_goto_chain_exec(const struct tc_action *a,
                                       struct tcf_result *res)
 {
-       const struct tcf_chain *chain = a->goto_chain;
+       const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
 
        res->goto_tp = rcu_dereference_bh(chain->filter_chain);
 }
@@ -71,6 +54,51 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
                call_rcu(&old->rcu, tcf_free_cookie_rcu);
 }
 
+int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
+                            struct tcf_chain **newchain,
+                            struct netlink_ext_ack *extack)
+{
+       int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
+       u32 chain_index;
+
+       if (!opcode)
+               ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
+       else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
+               ret = 0;
+       if (ret) {
+               NL_SET_ERR_MSG(extack, "invalid control action");
+               goto end;
+       }
+
+       if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
+               chain_index = action & TC_ACT_EXT_VAL_MASK;
+               if (!tp || !newchain) {
+                       ret = -EINVAL;
+                       NL_SET_ERR_MSG(extack,
+                                      "can't goto NULL proto/chain");
+                       goto end;
+               }
+               *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
+               if (!*newchain) {
+                       ret = -ENOMEM;
+                       NL_SET_ERR_MSG(extack,
+                                      "can't allocate goto_chain");
+               }
+       }
+end:
+       return ret;
+}
+EXPORT_SYMBOL(tcf_action_check_ctrlact);
+
+struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
+                                        struct tcf_chain *goto_chain)
+{
+       a->tcfa_action = action;
+       rcu_swap_protected(a->goto_chain, goto_chain, 1);
+       return goto_chain;
+}
+EXPORT_SYMBOL(tcf_action_set_ctrlact);
+
 /* XXX: For standalone actions, we don't need a RCU grace period either, because
  * actions are always connected to filters and filters are already destroyed in
  * RCU callbacks, so after a RCU grace period actions are already disconnected
@@ -78,13 +106,15 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
  */
 static void free_tcf(struct tc_action *p)
 {
+       struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
+
        free_percpu(p->cpu_bstats);
        free_percpu(p->cpu_bstats_hw);
        free_percpu(p->cpu_qstats);
 
        tcf_set_action_cookie(&p->act_cookie, NULL);
-       if (p->goto_chain)
-               tcf_action_goto_chain_fini(p);
+       if (chain)
+               tcf_chain_put_by_act(chain);
 
        kfree(p);
 }
@@ -654,6 +684,10 @@ repeat:
                                        return TC_ACT_OK;
                        }
                } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
+                       if (unlikely(!rcu_access_pointer(a->goto_chain))) {
+                               net_warn_ratelimited("can't go to NULL chain!\n");
+                               return TC_ACT_SHOT;
+                       }
                        tcf_action_goto_chain_exec(a, res);
                }
 
@@ -800,15 +834,6 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
        return c;
 }
 
-static bool tcf_action_valid(int action)
-{
-       int opcode = TC_ACT_EXT_OPCODE(action);
-
-       if (!opcode)
-               return action <= TC_ACT_VALUE_MAX;
-       return opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC;
-}
-
 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
                                    struct nlattr *nla, struct nlattr *est,
                                    char *name, int ovr, int bind,
@@ -890,10 +915,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        /* backward compatibility for policer */
        if (name == NULL)
                err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
-                               rtnl_held, extack);
+                               rtnl_held, tp, extack);
        else
                err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held,
-                               extack);
+                               tp, extack);
        if (err < 0)
                goto err_mod;
 
@@ -907,18 +932,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        if (err != ACT_P_CREATED)
                module_put(a_o->owner);
 
-       if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
-               err = tcf_action_goto_chain_init(a, tp);
-               if (err) {
-                       tcf_action_destroy_1(a, bind);
-                       NL_SET_ERR_MSG(extack, "Failed to init TC action chain");
-                       return ERR_PTR(err);
-               }
-       }
-
-       if (!tcf_action_valid(a->tcfa_action)) {
+       if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN) &&
+           !rcu_access_pointer(a->goto_chain)) {
                tcf_action_destroy_1(a, bind);
-               NL_SET_ERR_MSG(extack, "Invalid control action value");
+               NL_SET_ERR_MSG(extack, "can't use goto chain with NULL chain");
                return ERR_PTR(-EINVAL);
        }
 
index aa5c38d11a3079644d36329c8b4637e0bdfaa5c6..3841156aa09f778c285765b51342cd5d218a34ac 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_bpf.h>
 #include <net/tc_act/tc_bpf.h>
@@ -278,10 +279,11 @@ static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
 static int tcf_bpf_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **act,
                        int replace, int bind, bool rtnl_held,
-                       struct netlink_ext_ack *extack)
+                       struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, bpf_net_id);
        struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tcf_bpf_cfg cfg, old;
        struct tc_act_bpf *parm;
        struct tcf_bpf *prog;
@@ -323,12 +325,16 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
                return ret;
        }
 
+       ret = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (ret < 0)
+               goto release_idr;
+
        is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
        is_ebpf = tb[TCA_ACT_BPF_FD];
 
        if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
                ret = -EINVAL;
-               goto out;
+               goto put_chain;
        }
 
        memset(&cfg, 0, sizeof(cfg));
@@ -336,7 +342,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
        ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
                       tcf_bpf_init_from_efd(tb, &cfg);
        if (ret < 0)
-               goto out;
+               goto put_chain;
 
        prog = to_bpf(*act);
 
@@ -350,10 +356,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
        if (cfg.bpf_num_ops)
                prog->bpf_num_ops = cfg.bpf_num_ops;
 
-       prog->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*act, parm->action, goto_ch);
        rcu_assign_pointer(prog->filter, cfg.filter);
        spin_unlock_bh(&prog->tcf_lock);
 
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+
        if (res == ACT_P_CREATED) {
                tcf_idr_insert(tn, *act);
        } else {
@@ -363,9 +372,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
        }
 
        return res;
-out:
-       tcf_idr_release(*act, bind);
 
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+
+release_idr:
+       tcf_idr_release(*act, bind);
        return ret;
 }
 
index 5d24993cccfebead613c7dd15bb41a1087c8024e..32ae0cd6e31c67e36793081ac11371c2250eb0f1 100644 (file)
@@ -21,6 +21,7 @@
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 #include <net/act_api.h>
+#include <net/pkt_cls.h>
 #include <uapi/linux/tc_act/tc_connmark.h>
 #include <net/tc_act/tc_connmark.h>
 
@@ -97,13 +98,15 @@ static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = {
 static int tcf_connmark_init(struct net *net, struct nlattr *nla,
                             struct nlattr *est, struct tc_action **a,
                             int ovr, int bind, bool rtnl_held,
+                            struct tcf_proto *tp,
                             struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, connmark_net_id);
        struct nlattr *tb[TCA_CONNMARK_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tcf_connmark_info *ci;
        struct tc_connmark *parm;
-       int ret = 0;
+       int ret = 0, err;
 
        if (!nla)
                return -EINVAL;
@@ -128,7 +131,11 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
                }
 
                ci = to_connmark(*a);
-               ci->tcf_action = parm->action;
+               err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
+                                              extack);
+               if (err < 0)
+                       goto release_idr;
+               tcf_action_set_ctrlact(*a, parm->action, goto_ch);
                ci->net = net;
                ci->zone = parm->zone;
 
@@ -142,15 +149,24 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
                        tcf_idr_release(*a, bind);
                        return -EEXIST;
                }
+               err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
+                                              extack);
+               if (err < 0)
+                       goto release_idr;
                /* replacing action and zone */
                spin_lock_bh(&ci->tcf_lock);
-               ci->tcf_action = parm->action;
+               goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
                ci->zone = parm->zone;
                spin_unlock_bh(&ci->tcf_lock);
+               if (goto_ch)
+                       tcf_chain_put_by_act(goto_ch);
                ret = 0;
        }
 
        return ret;
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
index c79aca29505e33a44b7402b3ccc1ccaf8827b280..0c77e7bdf6d5d282965eb3970f3b8934e570e0d4 100644 (file)
@@ -33,6 +33,7 @@
 #include <net/sctp/checksum.h>
 
 #include <net/act_api.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_csum.h>
 #include <net/tc_act/tc_csum.h>
@@ -46,12 +47,13 @@ static struct tc_action_ops act_csum_ops;
 
 static int tcf_csum_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a, int ovr,
-                        int bind, bool rtnl_held,
+                        int bind, bool rtnl_held, struct tcf_proto *tp,
                         struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, csum_net_id);
        struct tcf_csum_params *params_new;
        struct nlattr *tb[TCA_CSUM_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_csum *parm;
        struct tcf_csum *p;
        int ret = 0, err;
@@ -87,21 +89,27 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
                return err;
        }
 
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
+
        p = to_tcf_csum(*a);
 
        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
        if (unlikely(!params_new)) {
-               tcf_idr_release(*a, bind);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto put_chain;
        }
        params_new->update_flags = parm->update_flags;
 
        spin_lock_bh(&p->tcf_lock);
-       p->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(p->params, params_new,
                           lockdep_is_held(&p->tcf_lock));
        spin_unlock_bh(&p->tcf_lock);
 
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
        if (params_new)
                kfree_rcu(params_new, rcu);
 
@@ -109,6 +117,12 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
                tcf_idr_insert(tn, *a);
 
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 /**
index 93da0004e9f415e9eb8439eeacdc0ae73f5783fd..e540e31069d746106eb82c5ea3c2f99f9438cbf4 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 #include <linux/tc_act/tc_gact.h>
 #include <net/tc_act/tc_gact.h>
 
@@ -57,10 +58,11 @@ static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
 static int tcf_gact_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
                         int ovr, int bind, bool rtnl_held,
-                        struct netlink_ext_ack *extack)
+                        struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, gact_net_id);
        struct nlattr *tb[TCA_GACT_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_gact *parm;
        struct tcf_gact *gact;
        int ret = 0;
@@ -116,10 +118,13 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
                return err;
        }
 
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
        gact = to_gact(*a);
 
        spin_lock_bh(&gact->tcf_lock);
-       gact->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
 #ifdef CONFIG_GACT_PROB
        if (p_parm) {
                gact->tcfg_paction = p_parm->paction;
@@ -133,9 +138,15 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
 #endif
        spin_unlock_bh(&gact->tcf_lock);
 
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a,
index 9b1f2b3990eedeeca44ba6ad5c7e334043727dd1..31c6ffb6abe7c607972de7b80fe77f03f8782723 100644 (file)
@@ -29,6 +29,7 @@
 #include <net/net_namespace.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 #include <uapi/linux/tc_act/tc_ife.h>
 #include <net/tc_act/tc_ife.h>
 #include <linux/etherdevice.h>
@@ -469,11 +470,12 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
 static int tcf_ife_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **a,
                        int ovr, int bind, bool rtnl_held,
-                       struct netlink_ext_ack *extack)
+                       struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, ife_net_id);
        struct nlattr *tb[TCA_IFE_MAX + 1];
        struct nlattr *tb2[IFE_META_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tcf_ife_params *p;
        struct tcf_ife_info *ife;
        u16 ife_type = ETH_P_IFE;
@@ -531,6 +533,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
        }
 
        ife = to_ife(*a);
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
+
        p->flags = parm->flags;
 
        if (parm->flags & IFE_ENCODE) {
@@ -563,13 +569,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
        if (tb[TCA_IFE_METALST]) {
                err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST],
                                       NULL, NULL);
-               if (err) {
-metadata_parse_err:
-                       tcf_idr_release(*a, bind);
-                       kfree(p);
-                       return err;
-               }
-
+               if (err)
+                       goto metadata_parse_err;
                err = populate_metalist(ife, tb2, exists, rtnl_held);
                if (err)
                        goto metadata_parse_err;
@@ -581,21 +582,20 @@ metadata_parse_err:
                 * going to bail out
                 */
                err = use_all_metadata(ife, exists);
-               if (err) {
-                       tcf_idr_release(*a, bind);
-                       kfree(p);
-                       return err;
-               }
+               if (err)
+                       goto metadata_parse_err;
        }
 
        if (exists)
                spin_lock_bh(&ife->tcf_lock);
-       ife->tcf_action = parm->action;
        /* protected by tcf_lock when modifying existing action */
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(ife->params, p, 1);
 
        if (exists)
                spin_unlock_bh(&ife->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
        if (p)
                kfree_rcu(p, rcu);
 
@@ -603,6 +603,13 @@ metadata_parse_err:
                tcf_idr_insert(tn, *a);
 
        return ret;
+metadata_parse_err:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       kfree(p);
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
index 98f5b6ea77b46ea7a55e1c325fd60542020b2165..04a0b5c611943a4e10bfa0855f1c3928e4e141de 100644 (file)
@@ -97,7 +97,8 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
 
 static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
                          struct nlattr *est, struct tc_action **a,
-                         const struct tc_action_ops *ops, int ovr, int bind)
+                         const struct tc_action_ops *ops, int ovr, int bind,
+                         struct tcf_proto *tp)
 {
        struct tc_action_net *tn = net_generic(net, id);
        struct nlattr *tb[TCA_IPT_MAX + 1];
@@ -205,20 +206,20 @@ err1:
 
 static int tcf_ipt_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **a, int ovr,
-                       int bind, bool rtnl_held,
+                       int bind, bool rtnl_held, struct tcf_proto *tp,
                        struct netlink_ext_ack *extack)
 {
        return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
-                             bind);
+                             bind, tp);
 }
 
 static int tcf_xt_init(struct net *net, struct nlattr *nla,
                       struct nlattr *est, struct tc_action **a, int ovr,
-                      int bind, bool unlocked,
+                      int bind, bool unlocked, struct tcf_proto *tp,
                       struct netlink_ext_ack *extack)
 {
        return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
-                             bind);
+                             bind, tp);
 }
 
 static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
index 6692fd0546177347a70123a959156d1120eace90..17cc6bd4c57c3a6f12786c3d1109c6e48af185dd 100644 (file)
@@ -94,10 +94,12 @@ static struct tc_action_ops act_mirred_ops;
 static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a,
                           int ovr, int bind, bool rtnl_held,
+                          struct tcf_proto *tp,
                           struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, mirred_net_id);
        struct nlattr *tb[TCA_MIRRED_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        bool mac_header_xmit = false;
        struct tc_mirred *parm;
        struct tcf_mirred *m;
@@ -157,18 +159,23 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                tcf_idr_release(*a, bind);
                return -EEXIST;
        }
+
        m = to_mirred(*a);
+       if (ret == ACT_P_CREATED)
+               INIT_LIST_HEAD(&m->tcfm_list);
+
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
 
        spin_lock_bh(&m->tcf_lock);
-       m->tcf_action = parm->action;
-       m->tcfm_eaction = parm->eaction;
 
        if (parm->ifindex) {
                dev = dev_get_by_index(net, parm->ifindex);
                if (!dev) {
                        spin_unlock_bh(&m->tcf_lock);
-                       tcf_idr_release(*a, bind);
-                       return -ENODEV;
+                       err = -ENODEV;
+                       goto put_chain;
                }
                mac_header_xmit = dev_is_mac_header_xmit(dev);
                rcu_swap_protected(m->tcfm_dev, dev,
@@ -177,7 +184,11 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                        dev_put(dev);
                m->tcfm_mac_header_xmit = mac_header_xmit;
        }
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
+       m->tcfm_eaction = parm->eaction;
        spin_unlock_bh(&m->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED) {
                spin_lock(&mirred_list_lock);
@@ -188,6 +199,12 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        }
 
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
index 543eab9193f17ca94756bff060fee937078a1e21..e91bb8eb81ec5e2e7dfe86d2832cdb6530f8e327 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/string.h>
 #include <linux/tc_act/tc_nat.h>
 #include <net/act_api.h>
+#include <net/pkt_cls.h>
 #include <net/icmp.h>
 #include <net/ip.h>
 #include <net/netlink.h>
@@ -38,10 +39,12 @@ static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
 
 static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
                        struct tc_action **a, int ovr, int bind,
-                       bool rtnl_held, struct netlink_ext_ack *extack)
+                       bool rtnl_held, struct tcf_proto *tp,
+                       struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, nat_net_id);
        struct nlattr *tb[TCA_NAT_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_nat *parm;
        int ret = 0, err;
        struct tcf_nat *p;
@@ -76,6 +79,9 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
        } else {
                return err;
        }
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
        p = to_tcf_nat(*a);
 
        spin_lock_bh(&p->tcf_lock);
@@ -84,13 +90,18 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
        p->mask = parm->mask;
        p->flags = parm->flags;
 
-       p->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        spin_unlock_bh(&p->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
 
        return ret;
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_nat_act(struct sk_buff *skb, const struct tc_action *a,
index a80373878df769d180a6c34a9c6fdda4e846efd8..287793abfaf9bae9aba9c4f23552890c795010c1 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/tc_act/tc_pedit.h>
 #include <net/tc_act/tc_pedit.h>
 #include <uapi/linux/tc_act/tc_pedit.h>
+#include <net/pkt_cls.h>
 
 static unsigned int pedit_net_id;
 static struct tc_action_ops act_pedit_ops;
@@ -138,10 +139,11 @@ nla_failure:
 static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                          struct nlattr *est, struct tc_action **a,
                          int ovr, int bind, bool rtnl_held,
-                         struct netlink_ext_ack *extack)
+                         struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, pedit_net_id);
        struct nlattr *tb[TCA_PEDIT_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_pedit_key *keys = NULL;
        struct tcf_pedit_key_ex *keys_ex;
        struct tc_pedit *parm;
@@ -205,6 +207,11 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                goto out_free;
        }
 
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0) {
+               ret = err;
+               goto out_release;
+       }
        p = to_pedit(*a);
        spin_lock_bh(&p->tcf_lock);
 
@@ -214,7 +221,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                if (!keys) {
                        spin_unlock_bh(&p->tcf_lock);
                        ret = -ENOMEM;
-                       goto out_release;
+                       goto put_chain;
                }
                kfree(p->tcfp_keys);
                p->tcfp_keys = keys;
@@ -223,16 +230,21 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
        memcpy(p->tcfp_keys, parm->keys, ksize);
 
        p->tcfp_flags = parm->flags;
-       p->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
 
        kfree(p->tcfp_keys_ex);
        p->tcfp_keys_ex = keys_ex;
 
        spin_unlock_bh(&p->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
 
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 out_release:
        tcf_idr_release(*a, bind);
 out_free:
index 8271a6263824bf53aaa92f97a35916d6d31aa244..2b8581f6ab510100e66fc3e2445ed6460ff65323 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <net/act_api.h>
 #include <net/netlink.h>
+#include <net/pkt_cls.h>
 
 struct tcf_police_params {
        int                     tcfp_result;
@@ -83,10 +84,12 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
 static int tcf_police_init(struct net *net, struct nlattr *nla,
                               struct nlattr *est, struct tc_action **a,
                               int ovr, int bind, bool rtnl_held,
+                              struct tcf_proto *tp,
                               struct netlink_ext_ack *extack)
 {
        int ret = 0, tcfp_result = TC_ACT_OK, err, size;
        struct nlattr *tb[TCA_POLICE_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_police *parm;
        struct tcf_police *police;
        struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
@@ -128,6 +131,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
                tcf_idr_release(*a, bind);
                return -EEXIST;
        }
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
 
        police = to_police(*a);
        if (parm->rate.rate) {
@@ -213,12 +219,14 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
        if (new->peak_present)
                police->tcfp_ptoks = new->tcfp_mtu_ptoks;
        spin_unlock_bh(&police->tcfp_lock);
-       police->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(police->params,
                           new,
                           lockdep_is_held(&police->tcf_lock));
        spin_unlock_bh(&police->tcf_lock);
 
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
        if (new)
                kfree_rcu(new, rcu);
 
@@ -229,6 +237,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
 failure:
        qdisc_put_rtab(P_tab);
        qdisc_put_rtab(R_tab);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
        tcf_idr_release(*a, bind);
        return err;
 }
index 203e399e5c85a293b29d52a84f31bff929827cf4..0f82d50ea23245be1ce34fcce1cdb4a048c1af17 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/tc_act/tc_sample.h>
 #include <net/tc_act/tc_sample.h>
 #include <net/psample.h>
+#include <net/pkt_cls.h>
 
 #include <linux/if_arp.h>
 
@@ -37,14 +38,15 @@ static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = {
 
 static int tcf_sample_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a, int ovr,
-                          int bind, bool rtnl_held,
+                          int bind, bool rtnl_held, struct tcf_proto *tp,
                           struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, sample_net_id);
        struct nlattr *tb[TCA_SAMPLE_MAX + 1];
        struct psample_group *psample_group;
+       struct tcf_chain *goto_ch = NULL;
+       u32 psample_group_num, rate;
        struct tc_sample *parm;
-       u32 psample_group_num;
        struct tcf_sample *s;
        bool exists = false;
        int ret, err;
@@ -79,19 +81,28 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
                tcf_idr_release(*a, bind);
                return -EEXIST;
        }
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
 
+       rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
+       if (!rate) {
+               NL_SET_ERR_MSG(extack, "invalid sample rate");
+               err = -EINVAL;
+               goto put_chain;
+       }
        psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
        psample_group = psample_group_get(net, psample_group_num);
        if (!psample_group) {
-               tcf_idr_release(*a, bind);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto put_chain;
        }
 
        s = to_sample(*a);
 
        spin_lock_bh(&s->tcf_lock);
-       s->tcf_action = parm->action;
-       s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
+       s->rate = rate;
        s->psample_group_num = psample_group_num;
        RCU_INIT_POINTER(s->psample_group, psample_group);
 
@@ -100,10 +111,18 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
                s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
        }
        spin_unlock_bh(&s->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static void tcf_sample_cleanup(struct tc_action *a)
index d54cb608dbafae7ea9a333bd6a117824f2a1caac..23c8ca5615e59b85d22bacda25a22cc02e54d9f6 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/rtnetlink.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_defact.h>
 #include <net/tc_act/tc_defact.h>
@@ -60,14 +61,26 @@ static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata)
        return 0;
 }
 
-static void reset_policy(struct tcf_defact *d, const struct nlattr *defdata,
-                        struct tc_defact *p)
+static int reset_policy(struct tc_action *a, const struct nlattr *defdata,
+                       struct tc_defact *p, struct tcf_proto *tp,
+                       struct netlink_ext_ack *extack)
 {
+       struct tcf_chain *goto_ch = NULL;
+       struct tcf_defact *d;
+       int err;
+
+       err = tcf_action_check_ctrlact(p->action, tp, &goto_ch, extack);
+       if (err < 0)
+               return err;
+       d = to_defact(a);
        spin_lock_bh(&d->tcf_lock);
-       d->tcf_action = p->action;
+       goto_ch = tcf_action_set_ctrlact(a, p->action, goto_ch);
        memset(d->tcfd_defdata, 0, SIMP_MAX_DATA);
        nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
        spin_unlock_bh(&d->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+       return 0;
 }
 
 static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = {
@@ -78,10 +91,11 @@ static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = {
 static int tcf_simp_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
                         int ovr, int bind, bool rtnl_held,
-                        struct netlink_ext_ack *extack)
+                        struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, simp_net_id);
        struct nlattr *tb[TCA_DEF_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_defact *parm;
        struct tcf_defact *d;
        bool exists = false;
@@ -122,27 +136,37 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
                }
 
                d = to_defact(*a);
-               ret = alloc_defdata(d, tb[TCA_DEF_DATA]);
-               if (ret < 0) {
-                       tcf_idr_release(*a, bind);
-                       return ret;
-               }
-               d->tcf_action = parm->action;
+               err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
+                                              extack);
+               if (err < 0)
+                       goto release_idr;
+
+               err = alloc_defdata(d, tb[TCA_DEF_DATA]);
+               if (err < 0)
+                       goto put_chain;
+
+               tcf_action_set_ctrlact(*a, parm->action, goto_ch);
                ret = ACT_P_CREATED;
        } else {
-               d = to_defact(*a);
-
                if (!ovr) {
-                       tcf_idr_release(*a, bind);
-                       return -EEXIST;
+                       err = -EEXIST;
+                       goto release_idr;
                }
 
-               reset_policy(d, tb[TCA_DEF_DATA], parm);
+               err = reset_policy(*a, tb[TCA_DEF_DATA], parm, tp, extack);
+               if (err)
+                       goto release_idr;
        }
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
index 65879500b688bca58c451b017928f22994fa9d82..7e1d261a31d2e73460f5d8b2d1d7869f18a1ea24 100644 (file)
@@ -26,6 +26,7 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/dsfield.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_skbedit.h>
 #include <net/tc_act/tc_skbedit.h>
@@ -96,11 +97,13 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
 static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                            struct nlattr *est, struct tc_action **a,
                            int ovr, int bind, bool rtnl_held,
+                           struct tcf_proto *tp,
                            struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, skbedit_net_id);
        struct tcf_skbedit_params *params_new;
        struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_skbedit *parm;
        struct tcf_skbedit *d;
        u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL;
@@ -186,11 +189,14 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                        return -EEXIST;
                }
        }
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
 
        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
        if (unlikely(!params_new)) {
-               tcf_idr_release(*a, bind);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto put_chain;
        }
 
        params_new->flags = flags;
@@ -208,16 +214,24 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                params_new->mask = *mask;
 
        spin_lock_bh(&d->tcf_lock);
-       d->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(d->params, params_new,
                           lockdep_is_held(&d->tcf_lock));
        spin_unlock_bh(&d->tcf_lock);
        if (params_new)
                kfree_rcu(params_new, rcu);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
index 7bac1d78e7a39994ccb9bea33fd3aa0fd7030a7e..1d4c324d0a42bd2cd707a987a5595f3119f66155 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/rtnetlink.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_skbmod.h>
 #include <net/tc_act/tc_skbmod.h>
@@ -82,11 +83,13 @@ static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
 static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a,
                           int ovr, int bind, bool rtnl_held,
+                          struct tcf_proto *tp,
                           struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, skbmod_net_id);
        struct nlattr *tb[TCA_SKBMOD_MAX + 1];
        struct tcf_skbmod_params *p, *p_old;
+       struct tcf_chain *goto_ch = NULL;
        struct tc_skbmod *parm;
        struct tcf_skbmod *d;
        bool exists = false;
@@ -153,21 +156,24 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
                tcf_idr_release(*a, bind);
                return -EEXIST;
        }
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
 
        d = to_skbmod(*a);
 
        p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
        if (unlikely(!p)) {
-               tcf_idr_release(*a, bind);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto put_chain;
        }
 
        p->flags = lflags;
-       d->tcf_action = parm->action;
 
        if (ovr)
                spin_lock_bh(&d->tcf_lock);
        /* Protected by tcf_lock if overwriting existing action. */
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        p_old = rcu_dereference_protected(d->skbmod_p, 1);
 
        if (lflags & SKBMOD_F_DMAC)
@@ -183,10 +189,18 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
 
        if (p_old)
                kfree_rcu(p_old, rcu);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static void tcf_skbmod_cleanup(struct tc_action *a)
index 7c6591b991d510318f0eba8ec8036f62d8019a1d..d5aaf90a39712982685cbed5f60d16576324f17b 100644 (file)
@@ -17,6 +17,7 @@
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 #include <net/dst.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_tunnel_key.h>
 #include <net/tc_act/tc_tunnel_key.h>
@@ -210,12 +211,14 @@ static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
 static int tunnel_key_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a,
                           int ovr, int bind, bool rtnl_held,
+                          struct tcf_proto *tp,
                           struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
        struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
        struct tcf_tunnel_key_params *params_new;
        struct metadata_dst *metadata = NULL;
+       struct tcf_chain *goto_ch = NULL;
        struct tc_tunnel_key *parm;
        struct tcf_tunnel_key *t;
        bool exists = false;
@@ -359,6 +362,12 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
                goto release_tun_meta;
        }
 
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0) {
+               ret = err;
+               exists = true;
+               goto release_tun_meta;
+       }
        t = to_tunnel_key(*a);
 
        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
@@ -366,23 +375,29 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
                NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
                ret = -ENOMEM;
                exists = true;
-               goto release_tun_meta;
+               goto put_chain;
        }
        params_new->tcft_action = parm->t_action;
        params_new->tcft_enc_metadata = metadata;
 
        spin_lock_bh(&t->tcf_lock);
-       t->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(t->params, params_new,
                           lockdep_is_held(&t->tcf_lock));
        spin_unlock_bh(&t->tcf_lock);
        tunnel_key_release_params(params_new);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
 
        return ret;
 
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+
 release_tun_meta:
        if (metadata)
                dst_release(&metadata->dst);
index ac0061599225b6871d846dce30dd83ae37abff25..0f40d0a74423b8d91bf8bb8838eb382846710851 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/if_vlan.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_vlan.h>
 #include <net/tc_act/tc_vlan.h>
@@ -105,10 +106,11 @@ static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
 static int tcf_vlan_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
                         int ovr, int bind, bool rtnl_held,
-                        struct netlink_ext_ack *extack)
+                        struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, vlan_net_id);
        struct nlattr *tb[TCA_VLAN_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tcf_vlan_params *p;
        struct tc_vlan *parm;
        struct tcf_vlan *v;
@@ -200,12 +202,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
                return -EEXIST;
        }
 
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
+
        v = to_vlan(*a);
 
        p = kzalloc(sizeof(*p), GFP_KERNEL);
        if (!p) {
-               tcf_idr_release(*a, bind);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto put_chain;
        }
 
        p->tcfv_action = action;
@@ -214,16 +220,24 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
        p->tcfv_push_proto = push_proto;
 
        spin_lock_bh(&v->tcf_lock);
-       v->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(v->vlan_p, p, lockdep_is_held(&v->tcf_lock));
        spin_unlock_bh(&v->tcf_lock);
 
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
        if (p)
                kfree_rcu(p, rcu);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static void tcf_vlan_cleanup(struct tc_action *a)
index dc10525e90e7073563f9a3b220fc092323946a19..99ae30c177c76783dae71bf7955f4d4d0bb3b639 100644 (file)
@@ -367,7 +367,7 @@ static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
        struct tcf_block *block = chain->block;
 
        mutex_destroy(&chain->filter_chain_lock);
-       kfree(chain);
+       kfree_rcu(chain, rcu);
        if (free_block)
                tcf_block_destroy(block);
 }
index 459921bd3d87b5a563d3725015f8765b20566aa0..a13bc351a4148f40f434b25a9adffc4cf9137548 100644 (file)
@@ -130,6 +130,11 @@ static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
 
 static void *mall_get(struct tcf_proto *tp, u32 handle)
 {
+       struct cls_mall_head *head = rtnl_dereference(tp->root);
+
+       if (head && head->handle == handle)
+               return head;
+
        return NULL;
 }
 
index 1d2a12132abcccdea4a237a9b06bb9296ff28f0b..259d97bc2abd39df8df646c2ebc34ea272e1fd70 100644 (file)
@@ -211,6 +211,9 @@ struct cake_sched_data {
        u8              ack_filter;
        u8              atm_mode;
 
+       u32             fwmark_mask;
+       u16             fwmark_shft;
+
        /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
        u16             rate_shft;
        ktime_t         time_next_packet;
@@ -258,8 +261,7 @@ enum {
        CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
        CAKE_FLAG_INGRESS          = BIT(2),
        CAKE_FLAG_WASH             = BIT(3),
-       CAKE_FLAG_SPLIT_GSO        = BIT(4),
-       CAKE_FLAG_FWMARK           = BIT(5)
+       CAKE_FLAG_SPLIT_GSO        = BIT(4)
 };
 
 /* COBALT operates the Codel and BLUE algorithms in parallel, in order to
@@ -1515,16 +1517,27 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
 
 static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
 {
+       int wlen = skb_network_offset(skb);
        u8 dscp;
 
-       switch (skb->protocol) {
+       switch (tc_skb_protocol(skb)) {
        case htons(ETH_P_IP):
+               wlen += sizeof(struct iphdr);
+               if (!pskb_may_pull(skb, wlen) ||
+                   skb_try_make_writable(skb, wlen))
+                       return 0;
+
                dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
                if (wash && dscp)
                        ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
                return dscp;
 
        case htons(ETH_P_IPV6):
+               wlen += sizeof(struct ipv6hdr);
+               if (!pskb_may_pull(skb, wlen) ||
+                   skb_try_make_writable(skb, wlen))
+                       return 0;
+
                dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
                if (wash && dscp)
                        ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
@@ -1543,7 +1556,7 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
                                             struct sk_buff *skb)
 {
        struct cake_sched_data *q = qdisc_priv(sch);
-       u32 tin;
+       u32 tin, mark;
        u8 dscp;
 
        /* Tin selection: Default to diffserv-based selection, allow overriding
@@ -1551,14 +1564,13 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
         */
        dscp = cake_handle_diffserv(skb,
                                    q->rate_flags & CAKE_FLAG_WASH);
+       mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
 
        if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
                tin = 0;
 
-       else if (q->rate_flags & CAKE_FLAG_FWMARK && /* use fw mark */
-                skb->mark &&
-                skb->mark <= q->tin_cnt)
-               tin = q->tin_order[skb->mark - 1];
+       else if (mark && mark <= q->tin_cnt)
+               tin = q->tin_order[mark - 1];
 
        else if (TC_H_MAJ(skb->priority) == sch->handle &&
                 TC_H_MIN(skb->priority) > 0 &&
@@ -2172,6 +2184,7 @@ static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
        [TCA_CAKE_MPU]           = { .type = NLA_U32 },
        [TCA_CAKE_INGRESS]       = { .type = NLA_U32 },
        [TCA_CAKE_ACK_FILTER]    = { .type = NLA_U32 },
+       [TCA_CAKE_FWMARK]        = { .type = NLA_U32 },
 };
 
 static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
@@ -2619,10 +2632,8 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
        }
 
        if (tb[TCA_CAKE_FWMARK]) {
-               if (!!nla_get_u32(tb[TCA_CAKE_FWMARK]))
-                       q->rate_flags |= CAKE_FLAG_FWMARK;
-               else
-                       q->rate_flags &= ~CAKE_FLAG_FWMARK;
+               q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]);
+               q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0;
        }
 
        if (q->tins) {
@@ -2784,8 +2795,7 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
                        !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
                goto nla_put_failure;
 
-       if (nla_put_u32(skb, TCA_CAKE_FWMARK,
-                       !!(q->rate_flags & CAKE_FLAG_FWMARK)))
+       if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask))
                goto nla_put_failure;
 
        return nla_nest_end(skb, opts);
index 4dc05409e3fb2742c1af9467aae5d1bf221b7101..114b9048ea7e3682106c6e65644d4d0992e20461 100644 (file)
@@ -1358,9 +1358,11 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl = (struct cbq_class *)arg;
+       __u32 qlen;
 
        cl->xstats.avgidle = cl->avgidle;
        cl->xstats.undertime = 0;
+       qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
 
        if (cl->undertime != PSCHED_PASTPERFECT)
                cl->xstats.undertime = cl->undertime - q->now;
@@ -1368,7 +1370,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
+           gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
                return -1;
 
        return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
@@ -1665,17 +1667,13 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl = (struct cbq_class *)arg;
-       unsigned int qlen, backlog;
 
        if (cl->filters || cl->children || cl == &q->link)
                return -EBUSY;
 
        sch_tree_lock(sch);
 
-       qlen = cl->q->q.qlen;
-       backlog = cl->q->qstats.backlog;
-       qdisc_reset(cl->q);
-       qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
+       qdisc_purge_queue(cl->q);
 
        if (cl->next_alive)
                cbq_deactivate_class(cl);
index 09b8009910657ace91e838eccfa520c81d800750..430df9a55ec4e9742786fb869ab3acf28e84f5ed 100644 (file)
@@ -50,15 +50,6 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
        return container_of(clc, struct drr_class, common);
 }
 
-static void drr_purge_queue(struct drr_class *cl)
-{
-       unsigned int len = cl->qdisc->q.qlen;
-       unsigned int backlog = cl->qdisc->qstats.backlog;
-
-       qdisc_reset(cl->qdisc);
-       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
-}
-
 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
        [TCA_DRR_QUANTUM]       = { .type = NLA_U32 },
 };
@@ -167,7 +158,7 @@ static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
 
        sch_tree_lock(sch);
 
-       drr_purge_queue(cl);
+       qdisc_purge_queue(cl->qdisc);
        qdisc_class_hash_remove(&q->clhash, &cl->common);
 
        sch_tree_unlock(sch);
@@ -269,7 +260,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
                                struct gnet_dump *d)
 {
        struct drr_class *cl = (struct drr_class *)arg;
-       __u32 qlen = cl->qdisc->q.qlen;
+       __u32 qlen = qdisc_qlen_sum(cl->qdisc);
+       struct Qdisc *cl_q = cl->qdisc;
        struct tc_drr_stats xstats;
 
        memset(&xstats, 0, sizeof(xstats));
@@ -279,7 +271,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
+           gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
                return -1;
 
        return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
index 24cc220a3218aee4f6c44ed271050c0b8d137ec9..d2ab463f22ae8b122ae43d1969cf795fb11c05b3 100644 (file)
@@ -844,16 +844,6 @@ qdisc_peek_len(struct Qdisc *sch)
        return len;
 }
 
-static void
-hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
-{
-       unsigned int len = cl->qdisc->q.qlen;
-       unsigned int backlog = cl->qdisc->qstats.backlog;
-
-       qdisc_reset(cl->qdisc);
-       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
-}
-
 static void
 hfsc_adjust_levels(struct hfsc_class *cl)
 {
@@ -1076,7 +1066,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
        list_add_tail(&cl->siblings, &parent->children);
        if (parent->level == 0)
-               hfsc_purge_queue(sch, parent);
+               qdisc_purge_queue(parent->qdisc);
        hfsc_adjust_levels(parent);
        sch_tree_unlock(sch);
 
@@ -1112,7 +1102,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
        list_del(&cl->siblings);
        hfsc_adjust_levels(cl->cl_parent);
 
-       hfsc_purge_queue(sch, cl);
+       qdisc_purge_queue(cl->qdisc);
        qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
 
        sch_tree_unlock(sch);
@@ -1328,8 +1318,9 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 {
        struct hfsc_class *cl = (struct hfsc_class *)arg;
        struct tc_hfsc_stats xstats;
+       __u32 qlen;
 
-       cl->qstats.backlog = cl->qdisc->qstats.backlog;
+       qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog);
        xstats.level   = cl->level;
        xstats.period  = cl->cl_vtperiod;
        xstats.work    = cl->cl_total;
@@ -1337,7 +1328,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
+           gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
                return -1;
 
        return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
index 30f9da7e1076368f2b0718d2bb0e1e3c5432998c..2f9883b196e8e6b10abd9b623b6285274a003ff6 100644 (file)
@@ -1127,10 +1127,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
        };
        __u32 qlen = 0;
 
-       if (!cl->level && cl->leaf.q) {
-               qlen = cl->leaf.q->q.qlen;
-               qs.backlog = cl->leaf.q->qstats.backlog;
-       }
+       if (!cl->level && cl->leaf.q)
+               qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
+
        cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
                                    INT_MIN, INT_MAX);
        cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
@@ -1270,13 +1269,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
 
        sch_tree_lock(sch);
 
-       if (!cl->level) {
-               unsigned int qlen = cl->leaf.q->q.qlen;
-               unsigned int backlog = cl->leaf.q->qstats.backlog;
-
-               qdisc_reset(cl->leaf.q);
-               qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog);
-       }
+       if (!cl->level)
+               qdisc_purge_queue(cl->leaf.q);
 
        /* delete from hash and active; remainder in destroy_class */
        qdisc_class_hash_remove(&q->clhash, &cl->common);
@@ -1404,12 +1398,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                                          classid, NULL);
                sch_tree_lock(sch);
                if (parent && !parent->level) {
-                       unsigned int qlen = parent->leaf.q->q.qlen;
-                       unsigned int backlog = parent->leaf.q->qstats.backlog;
-
                        /* turn parent into inner node */
-                       qdisc_reset(parent->leaf.q);
-                       qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog);
+                       qdisc_purge_queue(parent->leaf.q);
                        qdisc_put(parent->leaf.q);
                        if (parent->prio_activity)
                                htb_deactivate(q, parent);
index 203659bc3906419f6a00edca96561efb503d608d..3a3312467692c4f17bc03a78299322ac9a67250c 100644 (file)
@@ -249,7 +249,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 
        sch = dev_queue->qdisc_sleeping;
        if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
+           qdisc_qstats_copy(d, sch) < 0)
                return -1;
        return 0;
 }
index d364e63c396d78fe8866b9a8d7aa1ec9b281814e..ea0dc112b38dd4ac43d0fdc15f4742583c380991 100644 (file)
@@ -561,8 +561,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
                sch = dev_queue->qdisc_sleeping;
                if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                          d, NULL, &sch->bstats) < 0 ||
-                   gnet_stats_copy_queue(d, NULL,
-                                         &sch->qstats, sch->q.qlen) < 0)
+                   qdisc_qstats_copy(d, sch) < 0)
                        return -1;
        }
        return 0;
index 7410ce4d03213d315696ec933722edbc6c542f2a..35b03ae08e0f1f8afbd10f6f4c1d6078e22a48d1 100644 (file)
@@ -201,9 +201,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
        for (i = q->bands; i < q->max_bands; i++) {
                if (q->queues[i] != &noop_qdisc) {
                        struct Qdisc *child = q->queues[i];
+
                        q->queues[i] = &noop_qdisc;
-                       qdisc_tree_reduce_backlog(child, child->q.qlen,
-                                                 child->qstats.backlog);
+                       qdisc_tree_flush_backlog(child);
                        qdisc_put(child);
                }
        }
@@ -225,9 +225,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
                                        qdisc_hash_add(child, true);
 
                                if (old != &noop_qdisc) {
-                                       qdisc_tree_reduce_backlog(old,
-                                                                 old->q.qlen,
-                                                                 old->qstats.backlog);
+                                       qdisc_tree_flush_backlog(old);
                                        qdisc_put(old);
                                }
                                sch_tree_unlock(sch);
@@ -344,7 +342,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
        cl_q = q->queues[cl - 1];
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl_q->bstats) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
+           qdisc_qstats_copy(d, cl_q) < 0)
                return -1;
 
        return 0;
index 847141cd900f1933f0b48684085c747f06c092c1..d519b21535b36b1f163460593573cb018cd1a904 100644 (file)
@@ -216,12 +216,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
        q->bands = qopt->bands;
        memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
 
-       for (i = q->bands; i < oldbands; i++) {
-               struct Qdisc *child = q->queues[i];
-
-               qdisc_tree_reduce_backlog(child, child->q.qlen,
-                                         child->qstats.backlog);
-       }
+       for (i = q->bands; i < oldbands; i++)
+               qdisc_tree_flush_backlog(q->queues[i]);
 
        for (i = oldbands; i < q->bands; i++) {
                q->queues[i] = queues[i];
@@ -365,7 +361,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
        cl_q = q->queues[cl - 1];
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl_q->bstats) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
+           qdisc_qstats_copy(d, cl_q) < 0)
                return -1;
 
        return 0;
index 29f5c4a2468829457ddf734aa1e7711ebfe4bcc8..1589364b54da11dc241212dee190dad741d9d9bc 100644 (file)
@@ -217,15 +217,6 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
        return container_of(clc, struct qfq_class, common);
 }
 
-static void qfq_purge_queue(struct qfq_class *cl)
-{
-       unsigned int len = cl->qdisc->q.qlen;
-       unsigned int backlog = cl->qdisc->qstats.backlog;
-
-       qdisc_reset(cl->qdisc);
-       qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
-}
-
 static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
        [TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
        [TCA_QFQ_LMAX] = { .type = NLA_U32 },
@@ -551,7 +542,7 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
 
        sch_tree_lock(sch);
 
-       qfq_purge_queue(cl);
+       qdisc_purge_queue(cl->qdisc);
        qdisc_class_hash_remove(&q->clhash, &cl->common);
 
        sch_tree_unlock(sch);
@@ -655,8 +646,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
                                  d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-           gnet_stats_copy_queue(d, NULL,
-                                 &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
+           qdisc_qstats_copy(d, cl->qdisc) < 0)
                return -1;
 
        return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
index 9df9942340eaaa30ed38fc3345649f287a373bee..4e8c0abf619459f396b91fc587271b7938e48330 100644 (file)
@@ -233,8 +233,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
        q->flags = ctl->flags;
        q->limit = ctl->limit;
        if (child) {
-               qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
-                                         q->qdisc->qstats.backlog);
+               qdisc_tree_flush_backlog(q->qdisc);
                old_child = q->qdisc;
                q->qdisc = child;
        }
index bab506b01a32950d2ac07ff04815424705c62503..2419fdb759667a5c124f2018a310aabe9318b257 100644 (file)
@@ -521,8 +521,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
                qdisc_hash_add(child, true);
        sch_tree_lock(sch);
 
-       qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
-                                 q->qdisc->qstats.backlog);
+       qdisc_tree_flush_backlog(q->qdisc);
        qdisc_put(q->qdisc);
        q->qdisc = child;
 
index 206e4dbed12f0e08a2c8782fedac1247995001bb..c7041999eb5d348e7520dab451205fda7a72d22b 100644 (file)
@@ -895,7 +895,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 
        sch = dev_queue->qdisc_sleeping;
        if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
-           gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
+           qdisc_qstats_copy(d, sch) < 0)
                return -1;
        return 0;
 }
index 7f272a9070c5753e61dd140eca77afe4d17d6692..f71578dbb9e39292329e06d98535c764043acd55 100644 (file)
@@ -391,8 +391,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
 
        sch_tree_lock(sch);
        if (child) {
-               qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
-                                         q->qdisc->qstats.backlog);
+               qdisc_tree_flush_backlog(q->qdisc);
                qdisc_put(q->qdisc);
                q->qdisc = child;
        }
index 6abc8b274270730e482730bbc3ef735a7ffd2e52..951afdeea5e92c7cab48f53482d307c3f9893d89 100644 (file)
@@ -600,6 +600,7 @@ out:
 static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
 {
        /* No address mapping for V4 sockets */
+       memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
        return sizeof(struct sockaddr_in);
 }
 
index 1d143bc3f73de924766c7a2dec6134a99bf1bc38..4aa03588f87b981f7f495c53f4c486651688dcaa 100644 (file)
@@ -1112,32 +1112,6 @@ static void sctp_cmd_send_msg(struct sctp_association *asoc,
 }
 
 
-/* Sent the next ASCONF packet currently stored in the association.
- * This happens after the ASCONF_ACK was succeffully processed.
- */
-static void sctp_cmd_send_asconf(struct sctp_association *asoc)
-{
-       struct net *net = sock_net(asoc->base.sk);
-
-       /* Send the next asconf chunk from the addip chunk
-        * queue.
-        */
-       if (!list_empty(&asoc->addip_chunk_list)) {
-               struct list_head *entry = asoc->addip_chunk_list.next;
-               struct sctp_chunk *asconf = list_entry(entry,
-                                               struct sctp_chunk, list);
-               list_del_init(entry);
-
-               /* Hold the chunk until an ASCONF_ACK is received. */
-               sctp_chunk_hold(asconf);
-               if (sctp_primitive_ASCONF(net, asoc, asconf))
-                       sctp_chunk_free(asconf);
-               else
-                       asoc->addip_last_asconf = asconf;
-       }
-}
-
-
 /* These three macros allow us to pull the debugging code out of the
  * main flow of sctp_do_sm() to keep attention focused on the real
  * functionality there.
@@ -1783,9 +1757,6 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
                        }
                        sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
                        break;
-               case SCTP_CMD_SEND_NEXT_ASCONF:
-                       sctp_cmd_send_asconf(asoc);
-                       break;
                case SCTP_CMD_PURGE_ASCONF_QUEUE:
                        sctp_asconf_queue_teardown(asoc);
                        break;
index c9ae3404b1bb11572e34255cb3eae86ca1dd8131..713a669d205858a39d65af31396ab74abe138dc2 100644 (file)
@@ -3824,6 +3824,29 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
        return SCTP_DISPOSITION_CONSUME;
 }
 
+static enum sctp_disposition sctp_send_next_asconf(
+                                       struct net *net,
+                                       const struct sctp_endpoint *ep,
+                                       struct sctp_association *asoc,
+                                       const union sctp_subtype type,
+                                       struct sctp_cmd_seq *commands)
+{
+       struct sctp_chunk *asconf;
+       struct list_head *entry;
+
+       if (list_empty(&asoc->addip_chunk_list))
+               return SCTP_DISPOSITION_CONSUME;
+
+       entry = asoc->addip_chunk_list.next;
+       asconf = list_entry(entry, struct sctp_chunk, list);
+
+       list_del_init(entry);
+       sctp_chunk_hold(asconf);
+       asoc->addip_last_asconf = asconf;
+
+       return sctp_sf_do_prm_asconf(net, ep, asoc, type, asconf, commands);
+}
+
 /*
  * ADDIP Section 4.3 General rules for address manipulation
  * When building TLV parameters for the ASCONF Chunk that will add or
@@ -3915,14 +3938,10 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
                                SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
 
                if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
-                                            asconf_ack)) {
-                       /* Successfully processed ASCONF_ACK.  We can
-                        * release the next asconf if we have one.
-                        */
-                       sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
-                                       SCTP_NULL());
-                       return SCTP_DISPOSITION_CONSUME;
-               }
+                                            asconf_ack))
+                       return sctp_send_next_asconf(net, ep,
+                                       (struct sctp_association *)asoc,
+                                                       type, commands);
 
                abort = sctp_make_abort(asoc, asconf_ack,
                                        sizeof(struct sctp_errhdr));
index 6140471efd4b8cf851d238a8c80c22858346740e..4583fa914e62aedaf2ef29c5cf668f0caee4eade 100644 (file)
@@ -999,7 +999,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
        if (unlikely(addrs_size <= 0))
                return -EINVAL;
 
-       kaddrs = vmemdup_user(addrs, addrs_size);
+       kaddrs = memdup_user(addrs, addrs_size);
        if (unlikely(IS_ERR(kaddrs)))
                return PTR_ERR(kaddrs);
 
@@ -1007,7 +1007,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
        addr_buf = kaddrs;
        while (walk_size < addrs_size) {
                if (walk_size + sizeof(sa_family_t) > addrs_size) {
-                       kvfree(kaddrs);
+                       kfree(kaddrs);
                        return -EINVAL;
                }
 
@@ -1018,7 +1018,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
                 * causes the address buffer to overflow return EINVAL.
                 */
                if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
-                       kvfree(kaddrs);
+                       kfree(kaddrs);
                        return -EINVAL;
                }
                addrcnt++;
@@ -1054,7 +1054,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
        }
 
 out:
-       kvfree(kaddrs);
+       kfree(kaddrs);
 
        return err;
 }
@@ -1329,7 +1329,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
        if (unlikely(addrs_size <= 0))
                return -EINVAL;
 
-       kaddrs = vmemdup_user(addrs, addrs_size);
+       kaddrs = memdup_user(addrs, addrs_size);
        if (unlikely(IS_ERR(kaddrs)))
                return PTR_ERR(kaddrs);
 
@@ -1349,7 +1349,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
        err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
 
 out_free:
-       kvfree(kaddrs);
+       kfree(kaddrs);
 
        return err;
 }
@@ -2920,6 +2920,9 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
                return 0;
        }
 
+       if (sctp_style(sk, TCP))
+               params.sack_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (params.sack_assoc_id == SCTP_FUTURE_ASSOC ||
            params.sack_assoc_id == SCTP_ALL_ASSOC) {
                if (params.sack_delay) {
@@ -3024,6 +3027,9 @@ static int sctp_setsockopt_default_send_param(struct sock *sk,
                return 0;
        }
 
+       if (sctp_style(sk, TCP))
+               info.sinfo_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (info.sinfo_assoc_id == SCTP_FUTURE_ASSOC ||
            info.sinfo_assoc_id == SCTP_ALL_ASSOC) {
                sp->default_stream = info.sinfo_stream;
@@ -3081,6 +3087,9 @@ static int sctp_setsockopt_default_sndinfo(struct sock *sk,
                return 0;
        }
 
+       if (sctp_style(sk, TCP))
+               info.snd_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (info.snd_assoc_id == SCTP_FUTURE_ASSOC ||
            info.snd_assoc_id == SCTP_ALL_ASSOC) {
                sp->default_stream = info.snd_sid;
@@ -3531,6 +3540,9 @@ static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
                return 0;
        }
 
+       if (sctp_style(sk, TCP))
+               params.assoc_id = SCTP_FUTURE_ASSOC;
+
        if (params.assoc_id == SCTP_FUTURE_ASSOC ||
            params.assoc_id == SCTP_ALL_ASSOC)
                sp->default_rcv_context = params.assoc_value;
@@ -3670,6 +3682,9 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
                return 0;
        }
 
+       if (sctp_style(sk, TCP))
+               params.assoc_id = SCTP_FUTURE_ASSOC;
+
        if (params.assoc_id == SCTP_FUTURE_ASSOC ||
            params.assoc_id == SCTP_ALL_ASSOC)
                sp->max_burst = params.assoc_value;
@@ -3798,6 +3813,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
                goto out;
        }
 
+       if (sctp_style(sk, TCP))
+               authkey->sca_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (authkey->sca_assoc_id == SCTP_FUTURE_ASSOC ||
            authkey->sca_assoc_id == SCTP_ALL_ASSOC) {
                ret = sctp_auth_set_key(ep, asoc, authkey);
@@ -3853,6 +3871,9 @@ static int sctp_setsockopt_active_key(struct sock *sk,
        if (asoc)
                return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
 
+       if (sctp_style(sk, TCP))
+               val.scact_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
            val.scact_assoc_id == SCTP_ALL_ASSOC) {
                ret = sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
@@ -3904,6 +3925,9 @@ static int sctp_setsockopt_del_key(struct sock *sk,
        if (asoc)
                return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
 
+       if (sctp_style(sk, TCP))
+               val.scact_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
            val.scact_assoc_id == SCTP_ALL_ASSOC) {
                ret = sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
@@ -3954,6 +3978,9 @@ static int sctp_setsockopt_deactivate_key(struct sock *sk, char __user *optval,
        if (asoc)
                return sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
 
+       if (sctp_style(sk, TCP))
+               val.scact_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
            val.scact_assoc_id == SCTP_ALL_ASSOC) {
                ret = sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
@@ -4169,6 +4196,9 @@ static int sctp_setsockopt_default_prinfo(struct sock *sk,
                goto out;
        }
 
+       if (sctp_style(sk, TCP))
+               info.pr_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (info.pr_assoc_id == SCTP_FUTURE_ASSOC ||
            info.pr_assoc_id == SCTP_ALL_ASSOC) {
                SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy);
@@ -4251,6 +4281,9 @@ static int sctp_setsockopt_enable_strreset(struct sock *sk,
                goto out;
        }
 
+       if (sctp_style(sk, TCP))
+               params.assoc_id = SCTP_FUTURE_ASSOC;
+
        if (params.assoc_id == SCTP_FUTURE_ASSOC ||
            params.assoc_id == SCTP_ALL_ASSOC)
                ep->strreset_enable = params.assoc_value;
@@ -4376,6 +4409,9 @@ static int sctp_setsockopt_scheduler(struct sock *sk,
        if (asoc)
                return sctp_sched_set_sched(asoc, params.assoc_value);
 
+       if (sctp_style(sk, TCP))
+               params.assoc_id = SCTP_FUTURE_ASSOC;
+
        if (params.assoc_id == SCTP_FUTURE_ASSOC ||
            params.assoc_id == SCTP_ALL_ASSOC)
                sp->default_ss = params.assoc_value;
@@ -4541,6 +4577,9 @@ static int sctp_setsockopt_event(struct sock *sk, char __user *optval,
        if (asoc)
                return sctp_assoc_ulpevent_type_set(&param, asoc);
 
+       if (sctp_style(sk, TCP))
+               param.se_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (param.se_assoc_id == SCTP_FUTURE_ASSOC ||
            param.se_assoc_id == SCTP_ALL_ASSOC)
                sctp_ulpevent_type_set(&sp->subscribe,
@@ -4808,7 +4847,8 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr,
        }
 
        /* Validate addr_len before calling common connect/connectx routine. */
-       af = sctp_get_af_specific(addr->sa_family);
+       af = addr_len < offsetofend(struct sockaddr, sa_family) ? NULL :
+               sctp_get_af_specific(addr->sa_family);
        if (!af || addr_len < af->sockaddr_len) {
                err = -EINVAL;
        } else {
@@ -9169,7 +9209,7 @@ static inline void sctp_copy_descendant(struct sock *sk_to,
 {
        int ancestor_size = sizeof(struct inet_sock) +
                            sizeof(struct sctp_sock) -
-                           offsetof(struct sctp_sock, auto_asconf_list);
+                           offsetof(struct sctp_sock, pd_lobby);
 
        if (sk_from->sk_family == PF_INET6)
                ancestor_size += sizeof(struct ipv6_pinfo);
@@ -9253,7 +9293,6 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
         * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
         * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
         */
-       skb_queue_head_init(&newsp->pd_lobby);
        atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode);
 
        if (atomic_read(&sctp_sk(oldsk)->pd_mode)) {
index 77ef53596d18c5fd091b6888efbc8b35063087a8..6f869ef49b3226806ab7f7973821870d77618004 100644 (file)
@@ -167,10 +167,9 @@ static int smc_release(struct socket *sock)
 
        if (sk->sk_state == SMC_CLOSED) {
                if (smc->clcsock) {
-                       mutex_lock(&smc->clcsock_release_lock);
-                       sock_release(smc->clcsock);
-                       smc->clcsock = NULL;
-                       mutex_unlock(&smc->clcsock_release_lock);
+                       release_sock(sk);
+                       smc_clcsock_release(smc);
+                       lock_sock(sk);
                }
                if (!smc->use_fallback)
                        smc_conn_free(&smc->conn);
@@ -446,10 +445,19 @@ static void smc_link_save_peer_info(struct smc_link *link,
        link->peer_mtu = clc->qp_mtu;
 }
 
+static void smc_switch_to_fallback(struct smc_sock *smc)
+{
+       smc->use_fallback = true;
+       if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
+               smc->clcsock->file = smc->sk.sk_socket->file;
+               smc->clcsock->file->private_data = smc->clcsock;
+       }
+}
+
 /* fall back during connect */
 static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
 {
-       smc->use_fallback = true;
+       smc_switch_to_fallback(smc);
        smc->fallback_rsn = reason_code;
        smc_copy_sock_settings_to_clc(smc);
        if (smc->sk.sk_state == SMC_INIT)
@@ -775,10 +783,14 @@ static void smc_connect_work(struct work_struct *work)
                smc->sk.sk_err = -rc;
 
 out:
-       if (smc->sk.sk_err)
-               smc->sk.sk_state_change(&smc->sk);
-       else
-               smc->sk.sk_write_space(&smc->sk);
+       if (!sock_flag(&smc->sk, SOCK_DEAD)) {
+               if (smc->sk.sk_err) {
+                       smc->sk.sk_state_change(&smc->sk);
+               } else { /* allow polling before and after fallback decision */
+                       smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
+                       smc->sk.sk_write_space(&smc->sk);
+               }
+       }
        kfree(smc->connect_info);
        smc->connect_info = NULL;
        release_sock(&smc->sk);
@@ -872,11 +884,11 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
        if  (rc < 0)
                lsk->sk_err = -rc;
        if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
+               new_sk->sk_prot->unhash(new_sk);
                if (new_clcsock)
                        sock_release(new_clcsock);
                new_sk->sk_state = SMC_CLOSED;
                sock_set_flag(new_sk, SOCK_DEAD);
-               new_sk->sk_prot->unhash(new_sk);
                sock_put(new_sk); /* final */
                *new_smc = NULL;
                goto out;
@@ -927,16 +939,21 @@ struct sock *smc_accept_dequeue(struct sock *parent,
 
                smc_accept_unlink(new_sk);
                if (new_sk->sk_state == SMC_CLOSED) {
+                       new_sk->sk_prot->unhash(new_sk);
                        if (isk->clcsock) {
                                sock_release(isk->clcsock);
                                isk->clcsock = NULL;
                        }
-                       new_sk->sk_prot->unhash(new_sk);
                        sock_put(new_sk); /* final */
                        continue;
                }
-               if (new_sock)
+               if (new_sock) {
                        sock_graft(new_sk, new_sock);
+                       if (isk->use_fallback) {
+                               smc_sk(new_sk)->clcsock->file = new_sock->file;
+                               isk->clcsock->file->private_data = isk->clcsock;
+                       }
+               }
                return new_sk;
        }
        return NULL;
@@ -956,6 +973,7 @@ void smc_close_non_accepted(struct sock *sk)
                sock_set_flag(sk, SOCK_DEAD);
                sk->sk_shutdown |= SHUTDOWN_MASK;
        }
+       sk->sk_prot->unhash(sk);
        if (smc->clcsock) {
                struct socket *tcp;
 
@@ -971,7 +989,6 @@ void smc_close_non_accepted(struct sock *sk)
                        smc_conn_free(&smc->conn);
        }
        release_sock(sk);
-       sk->sk_prot->unhash(sk);
        sock_put(sk); /* final sock_put */
 }
 
@@ -1037,13 +1054,13 @@ static void smc_listen_out(struct smc_sock *new_smc)
        struct smc_sock *lsmc = new_smc->listen_smc;
        struct sock *newsmcsk = &new_smc->sk;
 
-       lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
        if (lsmc->sk.sk_state == SMC_LISTEN) {
+               lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
                smc_accept_enqueue(&lsmc->sk, newsmcsk);
+               release_sock(&lsmc->sk);
        } else { /* no longer listening */
                smc_close_non_accepted(newsmcsk);
        }
-       release_sock(&lsmc->sk);
 
        /* Wake up accept */
        lsmc->sk.sk_data_ready(&lsmc->sk);
@@ -1087,7 +1104,7 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
                return;
        }
        smc_conn_free(&new_smc->conn);
-       new_smc->use_fallback = true;
+       smc_switch_to_fallback(new_smc);
        new_smc->fallback_rsn = reason_code;
        if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
                if (smc_clc_send_decline(new_smc, reason_code) < 0) {
@@ -1237,6 +1254,9 @@ static void smc_listen_work(struct work_struct *work)
        int rc = 0;
        u8 ibport;
 
+       if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
+               return smc_listen_out_err(new_smc);
+
        if (new_smc->use_fallback) {
                smc_listen_out_connected(new_smc);
                return;
@@ -1244,7 +1264,7 @@ static void smc_listen_work(struct work_struct *work)
 
        /* check if peer is smc capable */
        if (!tcp_sk(newclcsock->sk)->syn_smc) {
-               new_smc->use_fallback = true;
+               smc_switch_to_fallback(new_smc);
                new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
                smc_listen_out_connected(new_smc);
                return;
@@ -1501,7 +1521,7 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 
        if (msg->msg_flags & MSG_FASTOPEN) {
                if (sk->sk_state == SMC_INIT) {
-                       smc->use_fallback = true;
+                       smc_switch_to_fallback(smc);
                        smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
                } else {
                        rc = -EINVAL;
@@ -1703,7 +1723,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
        case TCP_FASTOPEN_NO_COOKIE:
                /* option not supported by SMC */
                if (sk->sk_state == SMC_INIT) {
-                       smc->use_fallback = true;
+                       smc_switch_to_fallback(smc);
                        smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
                } else {
                        if (!smc->use_fallback)
index 2ad37e998509310f210f4e3654cc054487731e87..fc06720b53c1442a8dd3222ed7be482a8993ab92 100644 (file)
 
 #define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME     (5 * HZ)
 
+/* release the clcsock that is assigned to the smc_sock */
+void smc_clcsock_release(struct smc_sock *smc)
+{
+       struct socket *tcp;
+
+       if (smc->listen_smc && current_work() != &smc->smc_listen_work)
+               cancel_work_sync(&smc->smc_listen_work);
+       mutex_lock(&smc->clcsock_release_lock);
+       if (smc->clcsock) {
+               tcp = smc->clcsock;
+               smc->clcsock = NULL;
+               sock_release(tcp);
+       }
+       mutex_unlock(&smc->clcsock_release_lock);
+}
+
 static void smc_close_cleanup_listen(struct sock *parent)
 {
        struct sock *sk;
@@ -321,6 +337,7 @@ static void smc_close_passive_work(struct work_struct *work)
                                                   close_work);
        struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
        struct smc_cdc_conn_state_flags *rxflags;
+       bool release_clcsock = false;
        struct sock *sk = &smc->sk;
        int old_state;
 
@@ -400,13 +417,13 @@ wakeup:
                if ((sk->sk_state == SMC_CLOSED) &&
                    (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
                        smc_conn_free(conn);
-                       if (smc->clcsock) {
-                               sock_release(smc->clcsock);
-                               smc->clcsock = NULL;
-                       }
+                       if (smc->clcsock)
+                               release_clcsock = true;
                }
        }
        release_sock(sk);
+       if (release_clcsock)
+               smc_clcsock_release(smc);
        sock_put(sk); /* sock_hold done by schedulers of close_work */
 }
 
index 19eb6a211c23cd12fad8f5077a26209bb05c3d33..e0e3b5df25d2474b8aadd2e7639d07e0c8c631ef 100644 (file)
@@ -23,5 +23,6 @@ void smc_close_wake_tx_prepared(struct smc_sock *smc);
 int smc_close_active(struct smc_sock *smc);
 int smc_close_shutdown_write(struct smc_sock *smc);
 void smc_close_init(struct smc_sock *smc);
+void smc_clcsock_release(struct smc_sock *smc);
 
 #endif /* SMC_CLOSE_H */
index 2fff79db1a59ce3d2908722941dd9355810c65a0..e89e918b88e09acaad980da8dc34e3d921fe69be 100644 (file)
@@ -289,6 +289,11 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
        INIT_LIST_HEAD(&smcd->vlan);
        smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
                                                 WQ_MEM_RECLAIM, name);
+       if (!smcd->event_wq) {
+               kfree(smcd->conn);
+               kfree(smcd);
+               return NULL;
+       }
        return smcd;
 }
 EXPORT_SYMBOL_GPL(smcd_alloc_dev);
index 8d2f6296279c96827e332153ff274f522a3cb689..0285c7f9e79b6edb6a288be8bb50092a55bc7cfb 100644 (file)
@@ -603,7 +603,8 @@ static int smc_pnet_flush(struct sk_buff *skb, struct genl_info *info)
 {
        struct net *net = genl_info_net(info);
 
-       return smc_pnet_remove_by_pnetid(net, NULL);
+       smc_pnet_remove_by_pnetid(net, NULL);
+       return 0;
 }
 
 /* SMC_PNETID generic netlink operation definition */
index 3c176a12fe48048613d89add95189bb47cd5e14e..8255f5bda0aa07dbeb78460d5bbf52a9f4510fd3 100644 (file)
@@ -384,6 +384,18 @@ static struct file_system_type sock_fs_type = {
  *     but we take care of internal coherence yet.
  */
 
+/**
+ *     sock_alloc_file - Bind a &socket to a &file
+ *     @sock: socket
+ *     @flags: file status flags
+ *     @dname: protocol name
+ *
+ *     Returns the &file bound with @sock, implicitly storing it
+ *     in sock->file. If dname is %NULL, sets to "".
+ *     On failure the return is a ERR pointer (see linux/err.h).
+ *     This function uses GFP_KERNEL internally.
+ */
+
 struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
 {
        struct file *file;
@@ -424,6 +436,14 @@ static int sock_map_fd(struct socket *sock, int flags)
        return PTR_ERR(newfile);
 }
 
+/**
+ *     sock_from_file - Return the &socket bounded to @file.
+ *     @file: file
+ *     @err: pointer to an error code return
+ *
+ *     On failure returns %NULL and assigns -ENOTSOCK to @err.
+ */
+
 struct socket *sock_from_file(struct file *file, int *err)
 {
        if (file->f_op == &socket_file_ops)
@@ -532,11 +552,11 @@ static const struct inode_operations sockfs_inode_ops = {
 };
 
 /**
- *     sock_alloc      -       allocate a socket
+ *     sock_alloc - allocate a socket
  *
  *     Allocate a new inode and socket object. The two are bound together
  *     and initialised. The socket is then returned. If we are out of inodes
- *     NULL is returned.
+ *     NULL is returned. This functions uses GFP_KERNEL internally.
  */
 
 struct socket *sock_alloc(void)
@@ -561,7 +581,7 @@ struct socket *sock_alloc(void)
 EXPORT_SYMBOL(sock_alloc);
 
 /**
- *     sock_release    -       close a socket
+ *     sock_release - close a socket
  *     @sock: socket to close
  *
  *     The socket is released from the protocol stack if it has a release
@@ -617,6 +637,15 @@ void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
 }
 EXPORT_SYMBOL(__sock_tx_timestamp);
 
+/**
+ *     sock_sendmsg - send a message through @sock
+ *     @sock: socket
+ *     @msg: message to send
+ *
+ *     Sends @msg through @sock, passing through LSM.
+ *     Returns the number of bytes sent, or an error code.
+ */
+
 static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
 {
        int ret = sock->ops->sendmsg(sock, msg, msg_data_left(msg));
@@ -633,6 +662,18 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg)
 }
 EXPORT_SYMBOL(sock_sendmsg);
 
+/**
+ *     kernel_sendmsg - send a message through @sock (kernel-space)
+ *     @sock: socket
+ *     @msg: message header
+ *     @vec: kernel vec
+ *     @num: vec array length
+ *     @size: total message data size
+ *
+ *     Builds the message data with @vec and sends it through @sock.
+ *     Returns the number of bytes sent, or an error code.
+ */
+
 int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
                   struct kvec *vec, size_t num, size_t size)
 {
@@ -641,6 +682,19 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
 }
 EXPORT_SYMBOL(kernel_sendmsg);
 
+/**
+ *     kernel_sendmsg_locked - send a message through @sock (kernel-space)
+ *     @sk: sock
+ *     @msg: message header
+ *     @vec: output s/g array
+ *     @num: output s/g array length
+ *     @size: total message data size
+ *
+ *     Builds the message data with @vec and sends it through @sock.
+ *     Returns the number of bytes sent, or an error code.
+ *     Caller must hold @sk.
+ */
+
 int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
                          struct kvec *vec, size_t num, size_t size)
 {
@@ -811,6 +865,16 @@ void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
 }
 EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
 
+/**
+ *     sock_recvmsg - receive a message from @sock
+ *     @sock: socket
+ *     @msg: message to receive
+ *     @flags: message flags
+ *
+ *     Receives @msg from @sock, passing through LSM. Returns the total number
+ *     of bytes received, or an error.
+ */
+
 static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
                                     int flags)
 {
@@ -826,20 +890,21 @@ int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags)
 EXPORT_SYMBOL(sock_recvmsg);
 
 /**
- * kernel_recvmsg - Receive a message from a socket (kernel space)
- * @sock:       The socket to receive the message from
- * @msg:        Received message
- * @vec:        Input s/g array for message data
- * @num:        Size of input s/g array
- * @size:       Number of bytes to read
- * @flags:      Message flags (MSG_DONTWAIT, etc...)
+ *     kernel_recvmsg - Receive a message from a socket (kernel space)
+ *     @sock: The socket to receive the message from
+ *     @msg: Received message
+ *     @vec: Input s/g array for message data
+ *     @num: Size of input s/g array
+ *     @size: Number of bytes to read
+ *     @flags: Message flags (MSG_DONTWAIT, etc...)
  *
- * On return the msg structure contains the scatter/gather array passed in the
- * vec argument. The array is modified so that it consists of the unfilled
- * portion of the original array.
+ *     On return the msg structure contains the scatter/gather array passed in the
+ *     vec argument. The array is modified so that it consists of the unfilled
+ *     portion of the original array.
  *
- * The returned value is the total number of bytes received, or an error.
+ *     The returned value is the total number of bytes received, or an error.
  */
+
 int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
                   struct kvec *vec, size_t num, size_t size, int flags)
 {
@@ -1005,6 +1070,13 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
  *     what to do with it - that's up to the protocol still.
  */
 
+/**
+ *     get_net_ns - increment the refcount of the network namespace
+ *     @ns: common namespace (net)
+ *
+ *     Returns the net's common namespace.
+ */
+
 struct ns_common *get_net_ns(struct ns_common *ns)
 {
        return &get_net(container_of(ns, struct net, ns))->ns;
@@ -1099,6 +1171,19 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        return err;
 }
 
+/**
+ *     sock_create_lite - creates a socket
+ *     @family: protocol family (AF_INET, ...)
+ *     @type: communication type (SOCK_STREAM, ...)
+ *     @protocol: protocol (0, ...)
+ *     @res: new socket
+ *
+ *     Creates a new socket and assigns it to @res, passing through LSM.
+ *     The new socket initialization is not complete, see kernel_accept().
+ *     Returns 0 or an error. On failure @res is set to %NULL.
+ *     This function internally uses GFP_KERNEL.
+ */
+
 int sock_create_lite(int family, int type, int protocol, struct socket **res)
 {
        int err;
@@ -1224,6 +1309,21 @@ call_kill:
 }
 EXPORT_SYMBOL(sock_wake_async);
 
+/**
+ *     __sock_create - creates a socket
+ *     @net: net namespace
+ *     @family: protocol family (AF_INET, ...)
+ *     @type: communication type (SOCK_STREAM, ...)
+ *     @protocol: protocol (0, ...)
+ *     @res: new socket
+ *     @kern: boolean for kernel space sockets
+ *
+ *     Creates a new socket and assigns it to @res, passing through LSM.
+ *     Returns 0 or an error. On failure @res is set to %NULL. @kern must
+ *     be set to true if the socket resides in kernel space.
+ *     This function internally uses GFP_KERNEL.
+ */
+
 int __sock_create(struct net *net, int family, int type, int protocol,
                         struct socket **res, int kern)
 {
@@ -1333,12 +1433,35 @@ out_release:
 }
 EXPORT_SYMBOL(__sock_create);
 
+/**
+ *     sock_create - creates a socket
+ *     @family: protocol family (AF_INET, ...)
+ *     @type: communication type (SOCK_STREAM, ...)
+ *     @protocol: protocol (0, ...)
+ *     @res: new socket
+ *
+ *     A wrapper around __sock_create().
+ *     Returns 0 or an error. This function internally uses GFP_KERNEL.
+ */
+
 int sock_create(int family, int type, int protocol, struct socket **res)
 {
        return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0);
 }
 EXPORT_SYMBOL(sock_create);
 
+/**
+ *     sock_create_kern - creates a socket (kernel space)
+ *     @net: net namespace
+ *     @family: protocol family (AF_INET, ...)
+ *     @type: communication type (SOCK_STREAM, ...)
+ *     @protocol: protocol (0, ...)
+ *     @res: new socket
+ *
+ *     A wrapper around __sock_create().
+ *     Returns 0 or an error. This function internally uses GFP_KERNEL.
+ */
+
 int sock_create_kern(struct net *net, int family, int type, int protocol, struct socket **res)
 {
        return __sock_create(net, family, type, protocol, res, 1);
@@ -3322,18 +3445,46 @@ static long compat_sock_ioctl(struct file *file, unsigned int cmd,
 }
 #endif
 
+/**
+ *     kernel_bind - bind an address to a socket (kernel space)
+ *     @sock: socket
+ *     @addr: address
+ *     @addrlen: length of address
+ *
+ *     Returns 0 or an error.
+ */
+
 int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
 {
        return sock->ops->bind(sock, addr, addrlen);
 }
 EXPORT_SYMBOL(kernel_bind);
 
+/**
+ *     kernel_listen - move socket to listening state (kernel space)
+ *     @sock: socket
+ *     @backlog: pending connections queue size
+ *
+ *     Returns 0 or an error.
+ */
+
 int kernel_listen(struct socket *sock, int backlog)
 {
        return sock->ops->listen(sock, backlog);
 }
 EXPORT_SYMBOL(kernel_listen);
 
+/**
+ *     kernel_accept - accept a connection (kernel space)
+ *     @sock: listening socket
+ *     @newsock: new connected socket
+ *     @flags: flags
+ *
+ *     @flags must be SOCK_CLOEXEC, SOCK_NONBLOCK or 0.
+ *     If it fails, @newsock is guaranteed to be %NULL.
+ *     Returns 0 or an error.
+ */
+
 int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
 {
        struct sock *sk = sock->sk;
@@ -3359,6 +3510,19 @@ done:
 }
 EXPORT_SYMBOL(kernel_accept);
 
+/**
+ *     kernel_connect - connect a socket (kernel space)
+ *     @sock: socket
+ *     @addr: address
+ *     @addrlen: address length
+ *     @flags: flags (O_NONBLOCK, ...)
+ *
+ *     For datagram sockets, @addr is the addres to which datagrams are sent
+ *     by default, and the only address from which datagrams are received.
+ *     For stream sockets, attempts to connect to @addr.
+ *     Returns 0 or an error code.
+ */
+
 int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
                   int flags)
 {
@@ -3366,18 +3530,48 @@ int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
 }
 EXPORT_SYMBOL(kernel_connect);
 
+/**
+ *     kernel_getsockname - get the address which the socket is bound (kernel space)
+ *     @sock: socket
+ *     @addr: address holder
+ *
+ *     Fills the @addr pointer with the address which the socket is bound.
+ *     Returns 0 or an error code.
+ */
+
 int kernel_getsockname(struct socket *sock, struct sockaddr *addr)
 {
        return sock->ops->getname(sock, addr, 0);
 }
 EXPORT_SYMBOL(kernel_getsockname);
 
+/**
+ *     kernel_peername - get the address which the socket is connected (kernel space)
+ *     @sock: socket
+ *     @addr: address holder
+ *
+ *     Fills the @addr pointer with the address which the socket is connected.
+ *     Returns 0 or an error code.
+ */
+
 int kernel_getpeername(struct socket *sock, struct sockaddr *addr)
 {
        return sock->ops->getname(sock, addr, 1);
 }
 EXPORT_SYMBOL(kernel_getpeername);
 
+/**
+ *     kernel_getsockopt - get a socket option (kernel space)
+ *     @sock: socket
+ *     @level: API level (SOL_SOCKET, ...)
+ *     @optname: option tag
+ *     @optval: option value
+ *     @optlen: option length
+ *
+ *     Assigns the option length to @optlen.
+ *     Returns 0 or an error.
+ */
+
 int kernel_getsockopt(struct socket *sock, int level, int optname,
                        char *optval, int *optlen)
 {
@@ -3400,6 +3594,17 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
 }
 EXPORT_SYMBOL(kernel_getsockopt);
 
+/**
+ *     kernel_setsockopt - set a socket option (kernel space)
+ *     @sock: socket
+ *     @level: API level (SOL_SOCKET, ...)
+ *     @optname: option tag
+ *     @optval: option value
+ *     @optlen: option length
+ *
+ *     Returns 0 or an error.
+ */
+
 int kernel_setsockopt(struct socket *sock, int level, int optname,
                        char *optval, unsigned int optlen)
 {
@@ -3420,6 +3625,17 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
 }
 EXPORT_SYMBOL(kernel_setsockopt);
 
+/**
+ *     kernel_sendpage - send a &page through a socket (kernel space)
+ *     @sock: socket
+ *     @page: page
+ *     @offset: page offset
+ *     @size: total size in bytes
+ *     @flags: flags (MSG_DONTWAIT, ...)
+ *
+ *     Returns the total amount sent in bytes or an error.
+ */
+
 int kernel_sendpage(struct socket *sock, struct page *page, int offset,
                    size_t size, int flags)
 {
@@ -3430,6 +3646,18 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
 }
 EXPORT_SYMBOL(kernel_sendpage);
 
+/**
+ *     kernel_sendpage_locked - send a &page through the locked sock (kernel space)
+ *     @sk: sock
+ *     @page: page
+ *     @offset: page offset
+ *     @size: total size in bytes
+ *     @flags: flags (MSG_DONTWAIT, ...)
+ *
+ *     Returns the total amount sent in bytes or an error.
+ *     Caller must hold @sk.
+ */
+
 int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
                           size_t size, int flags)
 {
@@ -3443,17 +3671,30 @@ int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
 }
 EXPORT_SYMBOL(kernel_sendpage_locked);
 
+/**
+ *     kernel_shutdown - shut down part of a full-duplex connection (kernel space)
+ *     @sock: socket
+ *     @how: connection part
+ *
+ *     Returns 0 or an error.
+ */
+
 int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
 {
        return sock->ops->shutdown(sock, how);
 }
 EXPORT_SYMBOL(kernel_sock_shutdown);
 
-/* This routine returns the IP overhead imposed by a socket i.e.
- * the length of the underlying IP header, depending on whether
- * this is an IPv4 or IPv6 socket and the length from IP options turned
- * on at the socket. Assumes that the caller has a lock on the socket.
+/**
+ *     kernel_sock_ip_overhead - returns the IP overhead imposed by a socket
+ *     @sk: socket
+ *
+ *     This routine returns the IP overhead imposed by a socket i.e.
+ *     the length of the underlying IP header, depending on whether
+ *     this is an IPv4 or IPv6 socket and the length from IP options turned
+ *     on at the socket. Assumes that the caller has a lock on the socket.
  */
+
 u32 kernel_sock_ip_overhead(struct sock *sk)
 {
        struct inet_sock *inet;
index da1a676860cad3c8a2a95acf11f0e908fe2bc255..fa6c977b4c41a4a0b8deeb99c3e5d0d03c55de2b 100644 (file)
@@ -140,13 +140,11 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
                        /* We are going to append to the frags_list of head.
                         * Need to unshare the frag_list.
                         */
-                       if (skb_has_frag_list(head)) {
-                               err = skb_unclone(head, GFP_ATOMIC);
-                               if (err) {
-                                       STRP_STATS_INCR(strp->stats.mem_fail);
-                                       desc->error = err;
-                                       return 0;
-                               }
+                       err = skb_unclone(head, GFP_ATOMIC);
+                       if (err) {
+                               STRP_STATS_INCR(strp->stats.mem_fail);
+                               desc->error = err;
+                               return 0;
                        }
 
                        if (unlikely(skb_shinfo(head)->frag_list)) {
@@ -550,6 +548,8 @@ EXPORT_SYMBOL_GPL(strp_check_rcv);
 static int __init strp_mod_init(void)
 {
        strp_wq = create_singlethread_workqueue("kstrp");
+       if (unlikely(!strp_wq))
+               return -ENOMEM;
 
        return 0;
 }
index 12bb23b8e0c50c80abd51c69a5c5a2ea6433a723..261131dfa1f1ba3900d85088a6cfde659bbe231a 100644 (file)
@@ -54,6 +54,7 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
        h->last_refresh = now;
 }
 
+static inline int cache_is_valid(struct cache_head *h);
 static void cache_fresh_locked(struct cache_head *head, time_t expiry,
                                struct cache_detail *detail);
 static void cache_fresh_unlocked(struct cache_head *head,
@@ -105,6 +106,8 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
                        if (cache_is_expired(detail, tmp)) {
                                hlist_del_init_rcu(&tmp->cache_list);
                                detail->entries --;
+                               if (cache_is_valid(tmp) == -EAGAIN)
+                                       set_bit(CACHE_NEGATIVE, &tmp->flags);
                                cache_fresh_locked(tmp, 0, detail);
                                freeme = tmp;
                                break;
index 228970e6e52ba8b407be724d055976dd67530f81..8ff11dc98d7f93fefeff6ecc53ff6d7815da47f8 100644 (file)
@@ -1540,7 +1540,6 @@ call_start(struct rpc_task *task)
        clnt->cl_stats->rpccnt++;
        task->tk_action = call_reserve;
        rpc_task_set_transport(task, clnt);
-       call_reserve(task);
 }
 
 /*
@@ -1554,9 +1553,6 @@ call_reserve(struct rpc_task *task)
        task->tk_status  = 0;
        task->tk_action  = call_reserveresult;
        xprt_reserve(task);
-       if (rpc_task_need_resched(task))
-               return;
-        call_reserveresult(task);
 }
 
 static void call_retry_reserve(struct rpc_task *task);
@@ -1579,7 +1575,6 @@ call_reserveresult(struct rpc_task *task)
        if (status >= 0) {
                if (task->tk_rqstp) {
                        task->tk_action = call_refresh;
-                       call_refresh(task);
                        return;
                }
 
@@ -1605,7 +1600,6 @@ call_reserveresult(struct rpc_task *task)
                /* fall through */
        case -EAGAIN:   /* woken up; retry */
                task->tk_action = call_retry_reserve;
-               call_retry_reserve(task);
                return;
        case -EIO:      /* probably a shutdown */
                break;
@@ -1628,9 +1622,6 @@ call_retry_reserve(struct rpc_task *task)
        task->tk_status  = 0;
        task->tk_action  = call_reserveresult;
        xprt_retry_reserve(task);
-       if (rpc_task_need_resched(task))
-               return;
-       call_reserveresult(task);
 }
 
 /*
@@ -1645,9 +1636,6 @@ call_refresh(struct rpc_task *task)
        task->tk_status = 0;
        task->tk_client->cl_stats->rpcauthrefresh++;
        rpcauth_refreshcred(task);
-       if (rpc_task_need_resched(task))
-               return;
-       call_refreshresult(task);
 }
 
 /*
@@ -1666,7 +1654,6 @@ call_refreshresult(struct rpc_task *task)
        case 0:
                if (rpcauth_uptodatecred(task)) {
                        task->tk_action = call_allocate;
-                       call_allocate(task);
                        return;
                }
                /* Use rate-limiting and a max number of retries if refresh
@@ -1685,7 +1672,6 @@ call_refreshresult(struct rpc_task *task)
                task->tk_cred_retry--;
                dprintk("RPC: %5u %s: retry refresh creds\n",
                                task->tk_pid, __func__);
-               call_refresh(task);
                return;
        }
        dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
@@ -1711,10 +1697,8 @@ call_allocate(struct rpc_task *task)
        task->tk_status = 0;
        task->tk_action = call_encode;
 
-       if (req->rq_buffer) {
-               call_encode(task);
+       if (req->rq_buffer)
                return;
-       }
 
        if (proc->p_proc != 0) {
                BUG_ON(proc->p_arglen == 0);
@@ -1740,12 +1724,8 @@ call_allocate(struct rpc_task *task)
 
        status = xprt->ops->buf_alloc(task);
        xprt_inject_disconnect(xprt);
-       if (status == 0) {
-               if (rpc_task_need_resched(task))
-                       return;
-               call_encode(task);
+       if (status == 0)
                return;
-       }
        if (status != -ENOMEM) {
                rpc_exit(task, status);
                return;
@@ -1828,8 +1808,12 @@ call_encode(struct rpc_task *task)
                xprt_request_enqueue_receive(task);
        xprt_request_enqueue_transmit(task);
 out:
-       task->tk_action = call_bind;
-       call_bind(task);
+       task->tk_action = call_transmit;
+       /* Check that the connection is OK */
+       if (!xprt_bound(task->tk_xprt))
+               task->tk_action = call_bind;
+       else if (!xprt_connected(task->tk_xprt))
+               task->tk_action = call_connect;
 }
 
 /*
@@ -1847,7 +1831,6 @@ rpc_task_handle_transmitted(struct rpc_task *task)
 {
        xprt_end_transmit(task);
        task->tk_action = call_transmit_status;
-       call_transmit_status(task);
 }
 
 /*
@@ -1865,7 +1848,6 @@ call_bind(struct rpc_task *task)
 
        if (xprt_bound(xprt)) {
                task->tk_action = call_connect;
-               call_connect(task);
                return;
        }
 
@@ -1896,7 +1878,6 @@ call_bind_status(struct rpc_task *task)
                dprint_status(task);
                task->tk_status = 0;
                task->tk_action = call_connect;
-               call_connect(task);
                return;
        }
 
@@ -1981,7 +1962,6 @@ call_connect(struct rpc_task *task)
 
        if (xprt_connected(xprt)) {
                task->tk_action = call_transmit;
-               call_transmit(task);
                return;
        }
 
@@ -2051,7 +2031,6 @@ call_connect_status(struct rpc_task *task)
        case 0:
                clnt->cl_stats->netreconn++;
                task->tk_action = call_transmit;
-               call_transmit(task);
                return;
        }
        rpc_exit(task, status);
@@ -2087,9 +2066,6 @@ call_transmit(struct rpc_task *task)
                xprt_transmit(task);
        }
        xprt_end_transmit(task);
-       if (rpc_task_need_resched(task))
-               return;
-       call_transmit_status(task);
 }
 
 /*
@@ -2105,11 +2081,8 @@ call_transmit_status(struct rpc_task *task)
         * test first.
         */
        if (rpc_task_transmitted(task)) {
-               if (task->tk_status == 0)
-                       xprt_request_wait_receive(task);
-               if (rpc_task_need_resched(task))
-                       return;
-               call_status(task);
+               task->tk_status = 0;
+               xprt_request_wait_receive(task);
                return;
        }
 
@@ -2170,7 +2143,6 @@ call_bc_encode(struct rpc_task *task)
 {
        xprt_request_enqueue_transmit(task);
        task->tk_action = call_bc_transmit;
-       call_bc_transmit(task);
 }
 
 /*
@@ -2195,6 +2167,9 @@ call_bc_transmit_status(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
 
+       if (rpc_task_transmitted(task))
+               task->tk_status = 0;
+
        dprint_status(task);
 
        switch (task->tk_status) {
@@ -2261,7 +2236,6 @@ call_status(struct rpc_task *task)
        status = task->tk_status;
        if (status >= 0) {
                task->tk_action = call_decode;
-               call_decode(task);
                return;
        }
 
@@ -2311,6 +2285,15 @@ out_exit:
        rpc_exit(task, status);
 }
 
+static bool
+rpc_check_connected(const struct rpc_rqst *req)
+{
+       /* No allocated request or transport? return true */
+       if (!req || !req->rq_xprt)
+               return true;
+       return xprt_connected(req->rq_xprt);
+}
+
 static void
 rpc_check_timeout(struct rpc_task *task)
 {
@@ -2322,10 +2305,11 @@ rpc_check_timeout(struct rpc_task *task)
        dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
        task->tk_timeouts++;
 
-       if (RPC_IS_SOFTCONN(task)) {
+       if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) {
                rpc_exit(task, -ETIMEDOUT);
                return;
        }
+
        if (RPC_IS_SOFT(task)) {
                if (clnt->cl_chatty) {
                        printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
index 89a63391d4d442f6d390556aa8cf0b5a2a41357a..30cfc0efe6990aa5f693b56c26c257bd968f43e5 100644 (file)
@@ -90,7 +90,7 @@ static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
        /* Flush Receives, then wait for deferred Reply work
         * to complete.
         */
-       ib_drain_qp(ia->ri_id->qp);
+       ib_drain_rq(ia->ri_id->qp);
        drain_workqueue(buf->rb_completion_wq);
 
        /* Deferred Reply processing might have scheduled
index 9359539907bafb7ca1c13ad51485756ac89ce5d0..732d4b57411a2562ad8dc4ee2633c8441f204ba0 100644 (file)
@@ -495,8 +495,8 @@ xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
                int flags, struct rpc_rqst *req)
 {
        struct xdr_buf *buf = &req->rq_private_buf;
-       size_t want, read;
-       ssize_t ret;
+       size_t want, uninitialized_var(read);
+       ssize_t uninitialized_var(ret);
 
        xs_read_header(transport, buf);
 
index 06fee142f09fbea05a8b27bb240a4f3d3480b5b2..63f39201e41e699104d838f206d43f7f24806d3a 100644 (file)
@@ -919,6 +919,9 @@ int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb)
 {
        struct nlattr *group = nla_nest_start(skb, TIPC_NLA_SOCK_GROUP);
 
+       if (!group)
+               return -EMSGSIZE;
+
        if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID,
                        grp->type) ||
            nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE,
index 341ecd796aa473d35e770d4dfbf413ee3bcdc1cf..131aa2f0fd27c46e14f024b317dd65c786b0bea4 100644 (file)
@@ -869,6 +869,8 @@ void tipc_link_reset(struct tipc_link *l)
        __skb_queue_head_init(&list);
 
        l->in_session = false;
+       /* Force re-synch of peer session number before establishing */
+       l->peer_session--;
        l->session++;
        l->mtu = l->advertised_mtu;
 
index bff241f0352501aba8605622df16f2c85044c09b..89993afe0fbd38713dd3d0499cc79e6c3e159b4d 100644 (file)
@@ -909,7 +909,8 @@ static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg,
        for (; i < TIPC_NAMETBL_SIZE; i++) {
                head = &tn->nametbl->services[i];
 
-               if (*last_type) {
+               if (*last_type ||
+                   (!i && *last_key && (*last_lower == *last_key))) {
                        service = tipc_service_find(net, *last_type);
                        if (!service)
                                return -EPIPE;
index f076edb74338247f0bad99cfaa5d23e5b14730ab..7ce1e86b024f09cb7345840d4a4d8e3949c5107d 100644 (file)
@@ -163,12 +163,9 @@ void tipc_sched_net_finalize(struct net *net, u32 addr)
 
 void tipc_net_stop(struct net *net)
 {
-       u32 self = tipc_own_addr(net);
-
-       if (!self)
+       if (!tipc_own_id(net))
                return;
 
-       tipc_nametbl_withdraw(net, TIPC_CFG_SRV, self, self, self);
        rtnl_lock();
        tipc_bearer_stop(net);
        tipc_node_stop(net);
index 4ad3586da8f028c0fb8244382b343a1c7635f6cb..340a6e7c43a7d39c596de4f1b0045e4200edc2a3 100644 (file)
@@ -267,8 +267,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
        if (msg->rep_type)
                tipc_tlv_init(msg->rep, msg->rep_type);
 
-       if (cmd->header)
-               (*cmd->header)(msg);
+       if (cmd->header) {
+               err = (*cmd->header)(msg);
+               if (err) {
+                       kfree_skb(msg->rep);
+                       msg->rep = NULL;
+                       return err;
+               }
+       }
 
        arg = nlmsg_new(0, GFP_KERNEL);
        if (!arg) {
@@ -397,7 +403,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
        if (!bearer)
                return -EMSGSIZE;
 
-       len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+       len = TLV_GET_DATA_LEN(msg->req);
+       len -= offsetof(struct tipc_bearer_config, name);
+       if (len <= 0)
+               return -EINVAL;
+
+       len = min_t(int, len, TIPC_MAX_BEARER_NAME);
        if (!string_is_valid(b->name, len))
                return -EINVAL;
 
@@ -766,7 +777,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
 
        lc = (struct tipc_link_config *)TLV_DATA(msg->req);
 
-       len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+       len = TLV_GET_DATA_LEN(msg->req);
+       len -= offsetof(struct tipc_link_config, name);
+       if (len <= 0)
+               return -EINVAL;
+
+       len = min_t(int, len, TIPC_MAX_LINK_NAME);
        if (!string_is_valid(lc->name, len))
                return -EINVAL;
 
index 2dc4919ab23cace02749ddb9b4838c2b64c09152..dd3b6dc17662fc42eb0b567501c6b9e8bee67031 100644 (file)
@@ -817,10 +817,10 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
 {
        struct tipc_link_entry *le = &n->links[bearer_id];
+       struct tipc_media_addr *maddr = NULL;
        struct tipc_link *l = le->link;
-       struct tipc_media_addr *maddr;
-       struct sk_buff_head xmitq;
        int old_bearer_id = bearer_id;
+       struct sk_buff_head xmitq;
 
        if (!l)
                return;
@@ -844,7 +844,8 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
        tipc_node_write_unlock(n);
        if (delete)
                tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
-       tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
+       if (!skb_queue_empty(&xmitq))
+               tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
        tipc_sk_rcv(n->net, &le->inputq);
 }
 
index 3274ef625dba1b3417405d8537b4b30e919d44d1..b542f14ed444bfcedac61ef2d1eda45d1af1add2 100644 (file)
@@ -2349,6 +2349,16 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
        return 0;
 }
 
+static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
+{
+       if (addr->family != AF_TIPC)
+               return false;
+       if (addr->addrtype == TIPC_SERVICE_RANGE)
+               return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
+       return (addr->addrtype == TIPC_SERVICE_ADDR ||
+               addr->addrtype == TIPC_SOCKET_ADDR);
+}
+
 /**
  * tipc_connect - establish a connection to another TIPC port
  * @sock: socket structure
@@ -2384,18 +2394,18 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
                if (!tipc_sk_type_connectionless(sk))
                        res = -EINVAL;
                goto exit;
-       } else if (dst->family != AF_TIPC) {
-               res = -EINVAL;
        }
-       if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
+       if (!tipc_sockaddr_is_sane(dst)) {
                res = -EINVAL;
-       if (res)
                goto exit;
-
+       }
        /* DGRAM/RDM connect(), just save the destaddr */
        if (tipc_sk_type_connectionless(sk)) {
                memcpy(&tsk->peer, dest, destlen);
                goto exit;
+       } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
+               res = -EINVAL;
+               goto exit;
        }
 
        previous = sk->sk_state;
@@ -3255,6 +3265,8 @@ static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
        peer_port = tsk_peer_port(tsk);
 
        nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
+       if (!nest)
+               return -EMSGSIZE;
 
        if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
                goto msg_full;
index 3481e4906bd6a4a3e1f27ec5d49106090c7ec7f1..9df82a573aa7768f583999e740022ce00295bbd4 100644 (file)
@@ -38,6 +38,8 @@
 
 #include <linux/sysctl.h>
 
+static int zero;
+static int one = 1;
 static struct ctl_table_header *tipc_ctl_hdr;
 
 static struct ctl_table tipc_table[] = {
@@ -46,14 +48,16 @@ static struct ctl_table tipc_table[] = {
                .data           = &sysctl_tipc_rmem,
                .maxlen         = sizeof(sysctl_tipc_rmem),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &one,
        },
        {
                .procname       = "named_timeout",
                .data           = &sysctl_tipc_named_timeout,
                .maxlen         = sizeof(sysctl_tipc_named_timeout),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
        },
        {
                .procname       = "sk_filter",
index 4a708a4e8583b5db2022b80e80529e25e787e7bb..b45932d780040a35c099a8ead80ccea40e0910b9 100644 (file)
@@ -363,6 +363,7 @@ static int tipc_conn_rcv_sub(struct tipc_topsrv *srv,
        struct tipc_subscription *sub;
 
        if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) {
+               s->filter &= __constant_ntohl(~TIPC_SUB_CANCEL);
                tipc_conn_delete_sub(con, s);
                return 0;
        }
index 135a7ee9db034149252f8df3a56f7834ff573eab..14dedb24fa7b6ff1e7fe99f9adbe114eeabf15c1 100644 (file)
@@ -52,8 +52,11 @@ static DEFINE_SPINLOCK(tls_device_lock);
 
 static void tls_device_free_ctx(struct tls_context *ctx)
 {
-       if (ctx->tx_conf == TLS_HW)
+       if (ctx->tx_conf == TLS_HW) {
                kfree(tls_offload_ctx_tx(ctx));
+               kfree(ctx->tx.rec_seq);
+               kfree(ctx->tx.iv);
+       }
 
        if (ctx->rx_conf == TLS_HW)
                kfree(tls_offload_ctx_rx(ctx));
@@ -216,6 +219,13 @@ void tls_device_sk_destruct(struct sock *sk)
 }
 EXPORT_SYMBOL(tls_device_sk_destruct);
 
+void tls_device_free_resources_tx(struct sock *sk)
+{
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+       tls_free_partial_record(sk, tls_ctx);
+}
+
 static void tls_append_frag(struct tls_record_info *record,
                            struct page_frag *pfrag,
                            int size)
@@ -587,7 +597,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
 static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
 {
        struct strp_msg *rxm = strp_msg(skb);
-       int err = 0, offset = rxm->offset, copy, nsg;
+       int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
        struct sk_buff *skb_iter, *unused;
        struct scatterlist sg[1];
        char *orig_buf, *buf;
@@ -618,25 +628,42 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
        else
                err = 0;
 
-       copy = min_t(int, skb_pagelen(skb) - offset,
-                    rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+       data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
 
-       if (skb->decrypted)
-               skb_store_bits(skb, offset, buf, copy);
+       if (skb_pagelen(skb) > offset) {
+               copy = min_t(int, skb_pagelen(skb) - offset, data_len);
 
-       offset += copy;
-       buf += copy;
+               if (skb->decrypted)
+                       skb_store_bits(skb, offset, buf, copy);
+
+               offset += copy;
+               buf += copy;
+       }
 
+       pos = skb_pagelen(skb);
        skb_walk_frags(skb, skb_iter) {
-               copy = min_t(int, skb_iter->len,
-                            rxm->full_len - offset + rxm->offset -
-                            TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+               int frag_pos;
+
+               /* Practically all frags must belong to msg if reencrypt
+                * is needed with current strparser and coalescing logic,
+                * but strparser may "get optimized", so let's be safe.
+                */
+               if (pos + skb_iter->len <= offset)
+                       goto done_with_frag;
+               if (pos >= data_len + rxm->offset)
+                       break;
+
+               frag_pos = offset - pos;
+               copy = min_t(int, skb_iter->len - frag_pos,
+                            data_len + rxm->offset - offset);
 
                if (skb_iter->decrypted)
-                       skb_store_bits(skb_iter, offset, buf, copy);
+                       skb_store_bits(skb_iter, frag_pos, buf, copy);
 
                offset += copy;
                buf += copy;
+done_with_frag:
+               pos += skb_iter->len;
        }
 
 free_buf:
@@ -894,7 +921,9 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
        goto release_netdev;
 
 free_sw_resources:
+       up_read(&device_offload_lock);
        tls_sw_free_resources_rx(sk);
+       down_read(&device_offload_lock);
 release_ctx:
        ctx->priv_ctx_rx = NULL;
 release_netdev:
@@ -929,8 +958,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
        }
 out:
        up_read(&device_offload_lock);
-       kfree(tls_ctx->rx.rec_seq);
-       kfree(tls_ctx->rx.iv);
        tls_sw_release_resources_rx(sk);
 }
 
index 54c3a758f2a7d9bf32f9ab13c7dbe1389e54ecc3..c3a5fe624b4e3818aa3df07e2640f2af65fd3285 100644 (file)
@@ -194,18 +194,26 @@ static void update_chksum(struct sk_buff *skb, int headln)
 
 static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
 {
+       struct sock *sk = skb->sk;
+       int delta;
+
        skb_copy_header(nskb, skb);
 
        skb_put(nskb, skb->len);
        memcpy(nskb->data, skb->data, headln);
-       update_chksum(nskb, headln);
 
        nskb->destructor = skb->destructor;
-       nskb->sk = skb->sk;
+       nskb->sk = sk;
        skb->destructor = NULL;
        skb->sk = NULL;
-       refcount_add(nskb->truesize - skb->truesize,
-                    &nskb->sk->sk_wmem_alloc);
+
+       update_chksum(nskb, headln);
+
+       delta = nskb->truesize - skb->truesize;
+       if (likely(delta < 0))
+               WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
+       else if (delta)
+               refcount_add(delta, &sk->sk_wmem_alloc);
 }
 
 /* This function may be called after the user socket is already
index df921a2904b9b5b96acab53e52fa66090a900660..478603f43964d557146ae141ba45d4b0cae538fd 100644 (file)
@@ -208,6 +208,26 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
        return tls_push_sg(sk, ctx, sg, offset, flags);
 }
 
+bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
+{
+       struct scatterlist *sg;
+
+       sg = ctx->partially_sent_record;
+       if (!sg)
+               return false;
+
+       while (1) {
+               put_page(sg_page(sg));
+               sk_mem_uncharge(sk, sg->length);
+
+               if (sg_is_last(sg))
+                       break;
+               sg++;
+       }
+       ctx->partially_sent_record = NULL;
+       return true;
+}
+
 static void tls_write_space(struct sock *sk)
 {
        struct tls_context *ctx = tls_get_ctx(sk);
@@ -267,13 +287,14 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
                kfree(ctx->tx.rec_seq);
                kfree(ctx->tx.iv);
                tls_sw_free_resources_tx(sk);
+#ifdef CONFIG_TLS_DEVICE
+       } else if (ctx->tx_conf == TLS_HW) {
+               tls_device_free_resources_tx(sk);
+#endif
        }
 
-       if (ctx->rx_conf == TLS_SW) {
-               kfree(ctx->rx.rec_seq);
-               kfree(ctx->rx.iv);
+       if (ctx->rx_conf == TLS_SW)
                tls_sw_free_resources_rx(sk);
-       }
 
 #ifdef CONFIG_TLS_DEVICE
        if (ctx->rx_conf == TLS_HW)
index 425351ac2a9b156aacf9234e566d7c5ba0dc5867..29d6af43dd249dd72c175ac7401a308ef6193c2c 100644 (file)
@@ -1484,6 +1484,8 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
 
                                return err;
                        }
+               } else {
+                       *zc = false;
                }
 
                rxm->full_len -= padding_length(ctx, tls_ctx, skb);
@@ -2050,20 +2052,7 @@ void tls_sw_free_resources_tx(struct sock *sk)
        /* Free up un-sent records in tx_list. First, free
         * the partially sent record if any at head of tx_list.
         */
-       if (tls_ctx->partially_sent_record) {
-               struct scatterlist *sg = tls_ctx->partially_sent_record;
-
-               while (1) {
-                       put_page(sg_page(sg));
-                       sk_mem_uncharge(sk, sg->length);
-
-                       if (sg_is_last(sg))
-                               break;
-                       sg++;
-               }
-
-               tls_ctx->partially_sent_record = NULL;
-
+       if (tls_free_partial_record(sk, tls_ctx)) {
                rec = list_first_entry(&ctx->tx_list,
                                       struct tls_rec, list);
                list_del(&rec->list);
@@ -2089,6 +2078,9 @@ void tls_sw_release_resources_rx(struct sock *sk)
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
 
+       kfree(tls_ctx->rx.rec_seq);
+       kfree(tls_ctx->rx.iv);
+
        if (ctx->aead_recv) {
                kfree_skb(ctx->recv_pkt);
                ctx->recv_pkt = NULL;
index 25a9e3b5c1542a71fff0f4f2ab8166f977786f8c..47e30a58566c2817696655212a8da4c5fc00f00e 100644 (file)
@@ -13650,7 +13650,8 @@ static const struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_UNS_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_DEAUTHENTICATE,
@@ -13701,7 +13702,8 @@ static const struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_UNS_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS,
@@ -13709,7 +13711,8 @@ static const struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_DISCONNECT,
@@ -13738,7 +13741,8 @@ static const struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_UNS_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_DEL_PMKSA,
@@ -14090,7 +14094,8 @@ static const struct genl_ops nl80211_ops[] = {
                .policy = nl80211_policy,
                .flags = GENL_UNS_ADMIN_PERM,
                .internal_flags = NL80211_FLAG_NEED_WIPHY |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_SET_QOS_MAP,
@@ -14145,7 +14150,8 @@ static const struct genl_ops nl80211_ops[] = {
                .doit = nl80211_set_pmk,
                .policy = nl80211_policy,
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
-                                 NL80211_FLAG_NEED_RTNL,
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_CLEAR_SKB,
        },
        {
                .cmd = NL80211_CMD_DEL_PMK,
index 2f1bf91eb2265a26bcebeeb3589735e77a3a9daa..a6fd5ce199da197aeaffc6b71b75703fe811974b 100644 (file)
@@ -1309,6 +1309,16 @@ reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1,
        return dfs_region1;
 }
 
+static void reg_wmm_rules_intersect(const struct ieee80211_wmm_ac *wmm_ac1,
+                                   const struct ieee80211_wmm_ac *wmm_ac2,
+                                   struct ieee80211_wmm_ac *intersect)
+{
+       intersect->cw_min = max_t(u16, wmm_ac1->cw_min, wmm_ac2->cw_min);
+       intersect->cw_max = max_t(u16, wmm_ac1->cw_max, wmm_ac2->cw_max);
+       intersect->cot = min_t(u16, wmm_ac1->cot, wmm_ac2->cot);
+       intersect->aifsn = max_t(u8, wmm_ac1->aifsn, wmm_ac2->aifsn);
+}
+
 /*
  * Helper for regdom_intersect(), this does the real
  * mathematical intersection fun
@@ -1323,6 +1333,8 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
        struct ieee80211_freq_range *freq_range;
        const struct ieee80211_power_rule *power_rule1, *power_rule2;
        struct ieee80211_power_rule *power_rule;
+       const struct ieee80211_wmm_rule *wmm_rule1, *wmm_rule2;
+       struct ieee80211_wmm_rule *wmm_rule;
        u32 freq_diff, max_bandwidth1, max_bandwidth2;
 
        freq_range1 = &rule1->freq_range;
@@ -1333,6 +1345,10 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
        power_rule2 = &rule2->power_rule;
        power_rule = &intersected_rule->power_rule;
 
+       wmm_rule1 = &rule1->wmm_rule;
+       wmm_rule2 = &rule2->wmm_rule;
+       wmm_rule = &intersected_rule->wmm_rule;
+
        freq_range->start_freq_khz = max(freq_range1->start_freq_khz,
                                         freq_range2->start_freq_khz);
        freq_range->end_freq_khz = min(freq_range1->end_freq_khz,
@@ -1376,6 +1392,29 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
        intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms,
                                           rule2->dfs_cac_ms);
 
+       if (rule1->has_wmm && rule2->has_wmm) {
+               u8 ac;
+
+               for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+                       reg_wmm_rules_intersect(&wmm_rule1->client[ac],
+                                               &wmm_rule2->client[ac],
+                                               &wmm_rule->client[ac]);
+                       reg_wmm_rules_intersect(&wmm_rule1->ap[ac],
+                                               &wmm_rule2->ap[ac],
+                                               &wmm_rule->ap[ac]);
+               }
+
+               intersected_rule->has_wmm = true;
+       } else if (rule1->has_wmm) {
+               *wmm_rule = *wmm_rule1;
+               intersected_rule->has_wmm = true;
+       } else if (rule2->has_wmm) {
+               *wmm_rule = *wmm_rule2;
+               intersected_rule->has_wmm = true;
+       } else {
+               intersected_rule->has_wmm = false;
+       }
+
        if (!is_valid_reg_rule(intersected_rule))
                return -EINVAL;
 
@@ -3739,10 +3778,9 @@ void wiphy_regulatory_register(struct wiphy *wiphy)
                /*
                 * The last request may have been received before this
                 * registration call. Call the driver notifier if
-                * initiator is USER and user type is CELL_BASE.
+                * initiator is USER.
                 */
-               if (lr->initiator == NL80211_REGDOM_SET_BY_USER &&
-                   lr->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE)
+               if (lr->initiator == NL80211_REGDOM_SET_BY_USER)
                        reg_call_notifier(wiphy, lr);
        }
 
index 287518c6caa40204525993d8b2477e269324f378..04d888628f29dcca952d38d48b785d5e2c56dfef 100644 (file)
@@ -190,10 +190,9 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
        /* copy subelement as we need to change its content to
         * mark an ie after it is processed.
         */
-       sub_copy = kmalloc(subie_len, gfp);
+       sub_copy = kmemdup(subelement, subie_len, gfp);
        if (!sub_copy)
                return 0;
-       memcpy(sub_copy, subelement, subie_len);
 
        pos = &new_ie[0];
 
index e4b8db5e81ec710db0ee8e779046e04263441e08..75899b62bdc9ed2116a1420a6035c904307f4838 100644 (file)
@@ -1220,9 +1220,11 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
        else if (rate->bw == RATE_INFO_BW_HE_RU &&
                 rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26)
                result = rates_26[rate->he_gi];
-       else if (WARN(1, "invalid HE MCS: bw:%d, ru:%d\n",
-                     rate->bw, rate->he_ru_alloc))
+       else {
+               WARN(1, "invalid HE MCS: bw:%d, ru:%d\n",
+                    rate->bw, rate->he_ru_alloc);
                return 0;
+       }
 
        /* now scale to the appropriate MCS */
        tmp = result;
index 77520eacee8f18da45781ea70cc82ee16e3de94f..989e52386c358a34a566660933002827ba165068 100644 (file)
@@ -193,9 +193,6 @@ static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
 
 static void xdp_umem_release(struct xdp_umem *umem)
 {
-       struct task_struct *task;
-       struct mm_struct *mm;
-
        xdp_umem_clear_dev(umem);
 
        ida_simple_remove(&umem_ida, umem->id);
@@ -214,21 +211,10 @@ static void xdp_umem_release(struct xdp_umem *umem)
 
        xdp_umem_unpin_pages(umem);
 
-       task = get_pid_task(umem->pid, PIDTYPE_PID);
-       put_pid(umem->pid);
-       if (!task)
-               goto out;
-       mm = get_task_mm(task);
-       put_task_struct(task);
-       if (!mm)
-               goto out;
-
-       mmput(mm);
        kfree(umem->pages);
        umem->pages = NULL;
 
        xdp_umem_unaccount_pages(umem);
-out:
        kfree(umem);
 }
 
@@ -357,7 +343,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
        if (size_chk < 0)
                return -EINVAL;
 
-       umem->pid = get_task_pid(current, PIDTYPE_PID);
        umem->address = (unsigned long)addr;
        umem->chunk_mask = ~((u64)chunk_size - 1);
        umem->size = size;
@@ -373,7 +358,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
 
        err = xdp_umem_account_pages(umem);
        if (err)
-               goto out;
+               return err;
 
        err = xdp_umem_pin_pages(umem);
        if (err)
@@ -392,8 +377,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
 
 out_account:
        xdp_umem_unaccount_pages(umem);
-out:
-       put_pid(umem->pid);
        return err;
 }
 
index dbb3c1945b5c911b5933f60b284015a91524c832..85fec98676d34abdee2524d7f0ea0fe24b8b6a9d 100644 (file)
@@ -70,17 +70,28 @@ static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
        return NULL;
 }
 
-static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
+static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
+                                           unsigned short family)
 {
        struct xfrmi_net *xfrmn;
-       int ifindex;
        struct xfrm_if *xi;
+       int ifindex = 0;
 
        if (!secpath_exists(skb) || !skb->dev)
                return NULL;
 
+       switch (family) {
+       case AF_INET6:
+               ifindex = inet6_sdif(skb);
+               break;
+       case AF_INET:
+               ifindex = inet_sdif(skb);
+               break;
+       }
+       if (!ifindex)
+               ifindex = skb->dev->ifindex;
+
        xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
-       ifindex = skb->dev->ifindex;
 
        for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
                if (ifindex == xi->dev->ifindex &&
index 8d1a898d0ba562a25e8d42b1692d62ba766b7353..a6b58df7a70f614084f38f3591592650db5829c7 100644 (file)
@@ -3313,7 +3313,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
        ifcb = xfrm_if_get_cb();
 
        if (ifcb) {
-               xi = ifcb->decode_session(skb);
+               xi = ifcb->decode_session(skb, family);
                if (xi) {
                        if_id = xi->p.if_id;
                        net = xi->net;
index 586b4d656abde86eb2111577cabbbe683a32697b..c62f712fdaf71d51bf605e4412ec0274d4a0f633 100644 (file)
@@ -2390,7 +2390,7 @@ void xfrm_state_fini(struct net *net)
 
        flush_work(&net->xfrm.state_hash_work);
        flush_work(&xfrm_state_gc_work);
-       xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
+       xfrm_state_flush(net, 0, false, true);
 
        WARN_ON(!list_empty(&net->xfrm.state_all));
 
index a131f9ff979e1b64015ade91942cf1ce88eee15c..6916931b1de1ce6ea3f3a419a0e84aa353142eae 100644 (file)
@@ -1424,7 +1424,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
        ret = verify_policy_dir(p->dir);
        if (ret)
                return ret;
-       if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir))
+       if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
                return -EINVAL;
 
        return 0;
@@ -1513,20 +1513,8 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
                        return -EINVAL;
                }
 
-               switch (ut[i].id.proto) {
-               case IPPROTO_AH:
-               case IPPROTO_ESP:
-               case IPPROTO_COMP:
-#if IS_ENABLED(CONFIG_IPV6)
-               case IPPROTO_ROUTING:
-               case IPPROTO_DSTOPTS:
-#endif
-               case IPSEC_PROTO_ANY:
-                       break;
-               default:
+               if (!xfrm_id_proto_valid(ut[i].id.proto))
                        return -EINVAL;
-               }
-
        }
 
        return 0;
index 2554a15ecf2b8796c41e593d97c8b55ee97a4620..0c5969fa795f8423bfe6bde5a0444f27b2ff9538 100644 (file)
@@ -199,11 +199,8 @@ sub_cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
        "$(if $(part-of-module),1,0)" "$(@)";
 recordmcount_source := $(srctree)/scripts/recordmcount.pl
 endif # BUILD_C_RECORDMCOUNT
-cmd_record_mcount =                                            \
-       if [ "$(findstring $(CC_FLAGS_FTRACE),$(_c_flags))" =   \
-            "$(CC_FLAGS_FTRACE)" ]; then                       \
-               $(sub_cmd_record_mcount)                        \
-       fi
+cmd_record_mcount = $(if $(findstring $(strip $(CC_FLAGS_FTRACE)),$(_c_flags)),        \
+       $(sub_cmd_record_mcount))
 endif # CC_USING_RECORD_MCOUNT
 endif # CONFIG_FTRACE_MCOUNT_RECORD
 
@@ -225,6 +222,9 @@ endif
 ifdef CONFIG_RETPOLINE
   objtool_args += --retpoline
 endif
+ifdef CONFIG_X86_SMAP
+  objtool_args += --uaccess
+endif
 
 # 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
 # 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file
index 38b2b4818e8ebfbe9d19deb0aebef5fdbcc19d28..019771b845c5ff15727bcd23975c3ac2694ca80f 100644 (file)
@@ -3,7 +3,6 @@ ifdef CONFIG_UBSAN
       CFLAGS_UBSAN += $(call cc-option, -fsanitize=shift)
       CFLAGS_UBSAN += $(call cc-option, -fsanitize=integer-divide-by-zero)
       CFLAGS_UBSAN += $(call cc-option, -fsanitize=unreachable)
-      CFLAGS_UBSAN += $(call cc-option, -fsanitize=vla-bound)
       CFLAGS_UBSAN += $(call cc-option, -fsanitize=signed-integer-overflow)
       CFLAGS_UBSAN += $(call cc-option, -fsanitize=bounds)
       CFLAGS_UBSAN += $(call cc-option, -fsanitize=object-size)
index 27400b0cd732e2f37733e321dee5742c8cdf6721..000dc6437893baa133224c1d9922e97433c573f4 100644 (file)
@@ -13,7 +13,7 @@ gen-atomic-long.sh              asm-generic/atomic-long.h
 gen-atomic-fallback.sh          linux/atomic-fallback.h
 EOF
 while read script header; do
-       ${ATOMICDIR}/${script} ${ATOMICTBL} > ${LINUXDIR}/include/${header}
+       /bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} > ${LINUXDIR}/include/${header}
        HASH="$(sha1sum ${LINUXDIR}/include/${header})"
        HASH="${HASH%% *}"
        printf "// %s\n" "${HASH}" >> ${LINUXDIR}/include/${header}
index 5b756278df13e8aa7f5daf92914858afbfd24482..a09333fd7cef81053bf204dd03a4db07bbb833ef 100755 (executable)
@@ -5977,7 +5977,7 @@ sub process {
                                while ($fmt =~ /(\%[\*\d\.]*p(\w))/g) {
                                        $specifier = $1;
                                        $extension = $2;
-                                       if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOx]/) {
+                                       if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOxt]/) {
                                                $bad_specifier = $specifier;
                                                last;
                                        }
diff --git a/scripts/coccinelle/api/stream_open.cocci b/scripts/coccinelle/api/stream_open.cocci
new file mode 100644 (file)
index 0000000..350145d
--- /dev/null
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+// Author: Kirill Smelkov (kirr@nexedi.com)
+//
+// Search for stream-like files that are using nonseekable_open and convert
+// them to stream_open. A stream-like file is a file that does not use ppos in
+// its read and write. Rationale for the conversion is to avoid deadlock in
+// between read and write.
+
+virtual report
+virtual patch
+virtual explain  // explain decisions in the patch (SPFLAGS="-D explain")
+
+// stream-like reader & writer - ones that do not depend on f_pos.
+@ stream_reader @
+identifier readstream, ppos;
+identifier f, buf, len;
+type loff_t;
+@@
+  ssize_t readstream(struct file *f, char *buf, size_t len, loff_t *ppos)
+  {
+    ... when != ppos
+  }
+
+@ stream_writer @
+identifier writestream, ppos;
+identifier f, buf, len;
+type loff_t;
+@@
+  ssize_t writestream(struct file *f, const char *buf, size_t len, loff_t *ppos)
+  {
+    ... when != ppos
+  }
+
+
+// a function that blocks
+@ blocks @
+identifier block_f;
+identifier wait_event =~ "^wait_event_.*";
+@@
+  block_f(...) {
+    ... when exists
+    wait_event(...)
+    ... when exists
+  }
+
+// stream_reader that can block inside.
+//
+// XXX wait_* can be called not directly from current function (e.g. func -> f -> g -> wait())
+// XXX currently reader_blocks supports only direct and 1-level indirect cases.
+@ reader_blocks_direct @
+identifier stream_reader.readstream;
+identifier wait_event =~ "^wait_event_.*";
+@@
+  readstream(...)
+  {
+    ... when exists
+    wait_event(...)
+    ... when exists
+  }
+
+@ reader_blocks_1 @
+identifier stream_reader.readstream;
+identifier blocks.block_f;
+@@
+  readstream(...)
+  {
+    ... when exists
+    block_f(...)
+    ... when exists
+  }
+
+@ reader_blocks depends on reader_blocks_direct || reader_blocks_1 @
+identifier stream_reader.readstream;
+@@
+  readstream(...) {
+    ...
+  }
+
+
+// file_operations + whether they have _any_ .read, .write, .llseek ... at all.
+//
+// XXX add support for file_operations xxx[N] = ...    (sound/core/pcm_native.c)
+@ fops0 @
+identifier fops;
+@@
+  struct file_operations fops = {
+    ...
+  };
+
+@ has_read @
+identifier fops0.fops;
+identifier read_f;
+@@
+  struct file_operations fops = {
+    .read = read_f,
+  };
+
+@ has_read_iter @
+identifier fops0.fops;
+identifier read_iter_f;
+@@
+  struct file_operations fops = {
+    .read_iter = read_iter_f,
+  };
+
+@ has_write @
+identifier fops0.fops;
+identifier write_f;
+@@
+  struct file_operations fops = {
+    .write = write_f,
+  };
+
+@ has_write_iter @
+identifier fops0.fops;
+identifier write_iter_f;
+@@
+  struct file_operations fops = {
+    .write_iter = write_iter_f,
+  };
+
+@ has_llseek @
+identifier fops0.fops;
+identifier llseek_f;
+@@
+  struct file_operations fops = {
+    .llseek = llseek_f,
+  };
+
+@ has_no_llseek @
+identifier fops0.fops;
+@@
+  struct file_operations fops = {
+    .llseek = no_llseek,
+  };
+
+@ has_mmap @
+identifier fops0.fops;
+identifier mmap_f;
+@@
+  struct file_operations fops = {
+    .mmap = mmap_f,
+  };
+
+@ has_copy_file_range @
+identifier fops0.fops;
+identifier copy_file_range_f;
+@@
+  struct file_operations fops = {
+    .copy_file_range = copy_file_range_f,
+  };
+
+@ has_remap_file_range @
+identifier fops0.fops;
+identifier remap_file_range_f;
+@@
+  struct file_operations fops = {
+    .remap_file_range = remap_file_range_f,
+  };
+
+@ has_splice_read @
+identifier fops0.fops;
+identifier splice_read_f;
+@@
+  struct file_operations fops = {
+    .splice_read = splice_read_f,
+  };
+
+@ has_splice_write @
+identifier fops0.fops;
+identifier splice_write_f;
+@@
+  struct file_operations fops = {
+    .splice_write = splice_write_f,
+  };
+
+
+// file_operations that is candidate for stream_open conversion - it does not
+// use mmap and other methods that assume @offset access to file.
+//
+// XXX for simplicity require no .{read/write}_iter and no .splice_{read/write} for now.
+// XXX maybe_steam.fops cannot be used in other rules - it gives "bad rule maybe_stream or bad variable fops".
+@ maybe_stream depends on (!has_llseek || has_no_llseek) && !has_mmap && !has_copy_file_range && !has_remap_file_range && !has_read_iter && !has_write_iter && !has_splice_read && !has_splice_write @
+identifier fops0.fops;
+@@
+  struct file_operations fops = {
+  };
+
+
+// ---- conversions ----
+
+// XXX .open = nonseekable_open -> .open = stream_open
+// XXX .open = func -> openfunc -> nonseekable_open
+
+// read & write
+//
+// if both are used in the same file_operations together with an opener -
+// under that conditions we can use stream_open instead of nonseekable_open.
+@ fops_rw depends on maybe_stream @
+identifier fops0.fops, openfunc;
+identifier stream_reader.readstream;
+identifier stream_writer.writestream;
+@@
+  struct file_operations fops = {
+      .open  = openfunc,
+      .read  = readstream,
+      .write = writestream,
+  };
+
+@ report_rw depends on report @
+identifier fops_rw.openfunc;
+position p1;
+@@
+  openfunc(...) {
+    <...
+     nonseekable_open@p1
+    ...>
+  }
+
+@ script:python depends on report && reader_blocks @
+fops << fops0.fops;
+p << report_rw.p1;
+@@
+coccilib.report.print_report(p[0],
+  "ERROR: %s: .read() can deadlock .write(); change nonseekable_open -> stream_open to fix." % (fops,))
+
+@ script:python depends on report && !reader_blocks @
+fops << fops0.fops;
+p << report_rw.p1;
+@@
+coccilib.report.print_report(p[0],
+  "WARNING: %s: .read() and .write() have stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+
+@ explain_rw_deadlocked depends on explain && reader_blocks @
+identifier fops_rw.openfunc;
+@@
+  openfunc(...) {
+    <...
+-    nonseekable_open
++    nonseekable_open /* read & write (was deadlock) */
+    ...>
+  }
+
+
+@ explain_rw_nodeadlock depends on explain && !reader_blocks @
+identifier fops_rw.openfunc;
+@@
+  openfunc(...) {
+    <...
+-    nonseekable_open
++    nonseekable_open /* read & write (no direct deadlock) */
+    ...>
+  }
+
+@ patch_rw depends on patch @
+identifier fops_rw.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   stream_open
+    ...>
+  }
+
+
+// read, but not write
+@ fops_r depends on maybe_stream && !has_write @
+identifier fops0.fops, openfunc;
+identifier stream_reader.readstream;
+@@
+  struct file_operations fops = {
+      .open  = openfunc,
+      .read  = readstream,
+  };
+
+@ report_r depends on report @
+identifier fops_r.openfunc;
+position p1;
+@@
+  openfunc(...) {
+    <...
+    nonseekable_open@p1
+    ...>
+  }
+
+@ script:python depends on report @
+fops << fops0.fops;
+p << report_r.p1;
+@@
+coccilib.report.print_report(p[0],
+  "WARNING: %s: .read() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+@ explain_r depends on explain @
+identifier fops_r.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   nonseekable_open /* read only */
+    ...>
+  }
+
+@ patch_r depends on patch @
+identifier fops_r.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   stream_open
+    ...>
+  }
+
+
+// write, but not read
+@ fops_w depends on maybe_stream && !has_read @
+identifier fops0.fops, openfunc;
+identifier stream_writer.writestream;
+@@
+  struct file_operations fops = {
+      .open  = openfunc,
+      .write = writestream,
+  };
+
+@ report_w depends on report @
+identifier fops_w.openfunc;
+position p1;
+@@
+  openfunc(...) {
+    <...
+    nonseekable_open@p1
+    ...>
+  }
+
+@ script:python depends on report @
+fops << fops0.fops;
+p << report_w.p1;
+@@
+coccilib.report.print_report(p[0],
+  "WARNING: %s: .write() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+@ explain_w depends on explain @
+identifier fops_w.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   nonseekable_open /* write only */
+    ...>
+  }
+
+@ patch_w depends on patch @
+identifier fops_w.openfunc;
+@@
+  openfunc(...) {
+    <...
+-   nonseekable_open
++   stream_open
+    ...>
+  }
+
+
+// no read, no write - don't change anything
index 7395697e7f19a5f524d883b7079197434c2983ae..c9f071b0a0ab70b647bec3633571059934ece1bc 100644 (file)
@@ -32,6 +32,7 @@ if (id == NULL || ...) { ... return ...; }
 (    id
 |    (T2)dev_get_drvdata(&id->dev)
 |    (T3)platform_get_drvdata(id)
+|    &id->dev
 );
 | return@p2 ...;
 )
index 481cf301ccfc3abf2b68c8dcc8b59612ed2e9841..08470362199c7389009982ec41fed3b9860b89cc 100644 (file)
@@ -1,4 +1,4 @@
-/// Use ARRAY_SIZE instead of dividing sizeof array with sizeof an element
+/// Correct the size argument to alloc functions
 ///
 //# This makes an effort to find cases where the argument to sizeof is wrong
 //# in memory allocation functions by checking the type of the allocated memory
index 611945611bf8352d4831a51c411c2d3d5d7afc59..1dcfb288ee63630e7e73be6fe28f1fd1a3bc5857 100644 (file)
@@ -113,7 +113,8 @@ do_resize:
                        case KEY_DOWN:
                                break;
                        case KEY_BACKSPACE:
-                       case 127:
+                       case 8:   /* ^H */
+                       case 127: /* ^? */
                                if (pos) {
                                        wattrset(dialog, dlg.inputbox.atr);
                                        if (input_x == 0) {
index a4670f4e825a8c779cf4894587b6e7e31f556b02..ac92c0ded6c5c627e974679ef967d4bc37b25a53 100644 (file)
@@ -1048,7 +1048,7 @@ static int do_match(int key, struct match_state *state, int *ans)
                state->match_direction = FIND_NEXT_MATCH_UP;
                *ans = get_mext_match(state->pattern,
                                state->match_direction);
-       } else if (key == KEY_BACKSPACE || key == 127) {
+       } else if (key == KEY_BACKSPACE || key == 8 || key == 127) {
                state->pattern[strlen(state->pattern)-1] = '\0';
                adj_match_dir(&state->match_direction);
        } else
index 7be620a1fcdb8191639aaeaca7b5c6ae421d9769..77f525a8617c27788cc30f9a65c41041050806ed 100644 (file)
@@ -439,7 +439,8 @@ int dialog_inputbox(WINDOW *main_window,
                case KEY_F(F_EXIT):
                case KEY_F(F_BACK):
                        break;
-               case 127:
+               case 8:   /* ^H */
+               case 127: /* ^? */
                case KEY_BACKSPACE:
                        if (cursor_position > 0) {
                                memmove(&result[cursor_position-1],
index 0b0d1080b1c5ef4903a3b87d8fbfbc11b165e739..f277e116e0ebf64e350c636443b2f7c663b599ea 100644 (file)
@@ -639,7 +639,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
                               info->sechdrs[sym->st_shndx].sh_offset -
                               (info->hdr->e_type != ET_REL ?
                                info->sechdrs[sym->st_shndx].sh_addr : 0);
-                       crc = *crcp;
+                       crc = TO_NATIVE(*crcp);
                }
                sym_update_crc(symname + strlen("__crc_"), mod, crc,
                                export);
index 1ceedea847ddfec8ff856fb3a8a2110cd00343c3..544ca126a8a8c5bb057fe69b4e0e153a95b475f9 100644 (file)
@@ -9,7 +9,6 @@
 #include <string.h>
 #include <errno.h>
 #include <ctype.h>
-#include <sys/socket.h>
 
 struct security_class_mapping {
        const char *name;
index 073fe7537f6c00db183cb28aabe40856b3b28690..6d51b74bc679e7084a525c360422c0585af10c34 100644 (file)
@@ -32,7 +32,6 @@
 #include <stdlib.h>
 #include <unistd.h>
 #include <string.h>
-#include <sys/socket.h>
 
 static void usage(char *name)
 {
index 1d6463fb1450c03b8739b102b48b64e05aaa533e..353cfef71d4e9b89f0a71e46748b50dc5ce00c09 100644 (file)
@@ -239,8 +239,46 @@ source "security/safesetid/Kconfig"
 
 source "security/integrity/Kconfig"
 
+choice
+       prompt "First legacy 'major LSM' to be initialized"
+       default DEFAULT_SECURITY_SELINUX if SECURITY_SELINUX
+       default DEFAULT_SECURITY_SMACK if SECURITY_SMACK
+       default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO
+       default DEFAULT_SECURITY_APPARMOR if SECURITY_APPARMOR
+       default DEFAULT_SECURITY_DAC
+
+       help
+         This choice is there only for converting CONFIG_DEFAULT_SECURITY
+         in old kernel configs to CONFIG_LSM in new kernel configs. Don't
+         change this choice unless you are creating a fresh kernel config,
+         for this choice will be ignored after CONFIG_LSM has been set.
+
+         Selects the legacy "major security module" that will be
+         initialized first. Overridden by non-default CONFIG_LSM.
+
+       config DEFAULT_SECURITY_SELINUX
+               bool "SELinux" if SECURITY_SELINUX=y
+
+       config DEFAULT_SECURITY_SMACK
+               bool "Simplified Mandatory Access Control" if SECURITY_SMACK=y
+
+       config DEFAULT_SECURITY_TOMOYO
+               bool "TOMOYO" if SECURITY_TOMOYO=y
+
+       config DEFAULT_SECURITY_APPARMOR
+               bool "AppArmor" if SECURITY_APPARMOR=y
+
+       config DEFAULT_SECURITY_DAC
+               bool "Unix Discretionary Access Controls"
+
+endchoice
+
 config LSM
        string "Ordered list of enabled LSMs"
+       default "yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor" if DEFAULT_SECURITY_SMACK
+       default "yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo" if DEFAULT_SECURITY_APPARMOR
+       default "yama,loadpin,safesetid,integrity,tomoyo" if DEFAULT_SECURITY_TOMOYO
+       default "yama,loadpin,safesetid,integrity" if DEFAULT_SECURITY_DAC
        default "yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
        help
          A comma-separated list of LSMs, in initialization order.
index fefee040bf79132e03864320d6c5e19b83907094..b9298d2e816547da98c2fd19da931b66fe1f110a 100644 (file)
@@ -123,17 +123,22 @@ static int aafs_show_path(struct seq_file *seq, struct dentry *dentry)
        return 0;
 }
 
-static void aafs_evict_inode(struct inode *inode)
+static void aafs_i_callback(struct rcu_head *head)
 {
-       truncate_inode_pages_final(&inode->i_data);
-       clear_inode(inode);
+       struct inode *inode = container_of(head, struct inode, i_rcu);
        if (S_ISLNK(inode->i_mode))
                kfree(inode->i_link);
+       free_inode_nonrcu(inode);
+}
+
+static void aafs_destroy_inode(struct inode *inode)
+{
+       call_rcu(&inode->i_rcu, aafs_i_callback);
 }
 
 static const struct super_operations aafs_super_ops = {
        .statfs = simple_statfs,
-       .evict_inode = aafs_evict_inode,
+       .destroy_inode = aafs_destroy_inode,
        .show_path = aafs_show_path,
 };
 
index 49d664ddff444810ef9c6e8a1b0276c5ba473c53..87500bde5a92d599ccaa4e892ff12527feac58eb 100644 (file)
@@ -1336,9 +1336,16 @@ module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR);
 bool aa_g_paranoid_load = true;
 module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUGO);
 
+static int param_get_aaintbool(char *buffer, const struct kernel_param *kp);
+static int param_set_aaintbool(const char *val, const struct kernel_param *kp);
+#define param_check_aaintbool param_check_int
+static const struct kernel_param_ops param_ops_aaintbool = {
+       .set = param_set_aaintbool,
+       .get = param_get_aaintbool
+};
 /* Boot time disable flag */
 static int apparmor_enabled __lsm_ro_after_init = 1;
-module_param_named(enabled, apparmor_enabled, int, 0444);
+module_param_named(enabled, apparmor_enabled, aaintbool, 0444);
 
 static int __init apparmor_enabled_setup(char *str)
 {
@@ -1413,6 +1420,46 @@ static int param_get_aauint(char *buffer, const struct kernel_param *kp)
        return param_get_uint(buffer, kp);
 }
 
+/* Can only be set before AppArmor is initialized (i.e. on boot cmdline). */
+static int param_set_aaintbool(const char *val, const struct kernel_param *kp)
+{
+       struct kernel_param kp_local;
+       bool value;
+       int error;
+
+       if (apparmor_initialized)
+               return -EPERM;
+
+       /* Create local copy, with arg pointing to bool type. */
+       value = !!*((int *)kp->arg);
+       memcpy(&kp_local, kp, sizeof(kp_local));
+       kp_local.arg = &value;
+
+       error = param_set_bool(val, &kp_local);
+       if (!error)
+               *((int *)kp->arg) = *((bool *)kp_local.arg);
+       return error;
+}
+
+/*
+ * To avoid changing /sys/module/apparmor/parameters/enabled from Y/N to
+ * 1/0, this converts the "int that is actually bool" back to bool for
+ * display in the /sys filesystem, while keeping it "int" for the LSM
+ * infrastructure.
+ */
+static int param_get_aaintbool(char *buffer, const struct kernel_param *kp)
+{
+       struct kernel_param kp_local;
+       bool value;
+
+       /* Create local copy, with arg pointing to bool type. */
+       value = !!*((int *)kp->arg);
+       memcpy(&kp_local, kp, sizeof(kp_local));
+       kp_local.arg = &value;
+
+       return param_get_bool(buffer, &kp_local);
+}
+
 static int param_get_audit(char *buffer, const struct kernel_param *kp)
 {
        if (!apparmor_enabled)
index cd97929fac663f61250edeae3397e3ab75b5ff49..dc28914fa72e076405b238225dc54c172df5de40 100644 (file)
@@ -560,7 +560,7 @@ static int propagate_exception(struct dev_cgroup *devcg_root,
                    devcg->behavior == DEVCG_DEFAULT_ALLOW) {
                        rc = dev_exception_add(devcg, ex);
                        if (rc)
-                               break;
+                               return rc;
                } else {
                        /*
                         * in the other possible cases:
index b7772a9b315eea144fd24afef873b7e54519d7f9..421dd72b5876720d70642f1d7d2d421a2e0d5e75 100644 (file)
 static struct vfsmount *mount;
 static int mount_count;
 
-static void securityfs_evict_inode(struct inode *inode)
+static void securityfs_i_callback(struct rcu_head *head)
 {
-       truncate_inode_pages_final(&inode->i_data);
-       clear_inode(inode);
+       struct inode *inode = container_of(head, struct inode, i_rcu);
        if (S_ISLNK(inode->i_mode))
                kfree(inode->i_link);
+       free_inode_nonrcu(inode);
+}
+
+static void securityfs_destroy_inode(struct inode *inode)
+{
+       call_rcu(&inode->i_rcu, securityfs_i_callback);
 }
 
 static const struct super_operations securityfs_super_operations = {
        .statfs         = simple_statfs,
-       .evict_inode    = securityfs_evict_inode,
+       .destroy_inode  = securityfs_destroy_inode,
 };
 
 static int fill_super(struct super_block *sb, void *data, int silent)
index bcc9c6ead7fd30962cf2ac3755e61d011d62c3da..efdbf17f3915259ea34ed76f5d452a4763236043 100644 (file)
@@ -125,7 +125,7 @@ out:
  */
 int TSS_authhmac(unsigned char *digest, const unsigned char *key,
                        unsigned int keylen, unsigned char *h1,
-                       unsigned char *h2, unsigned char h3, ...)
+                       unsigned char *h2, unsigned int h3, ...)
 {
        unsigned char paramdigest[SHA1_DIGEST_SIZE];
        struct sdesc *sdesc;
@@ -135,13 +135,16 @@ int TSS_authhmac(unsigned char *digest, const unsigned char *key,
        int ret;
        va_list argp;
 
+       if (!chip)
+               return -ENODEV;
+
        sdesc = init_sdesc(hashalg);
        if (IS_ERR(sdesc)) {
                pr_info("trusted_key: can't alloc %s\n", hash_alg);
                return PTR_ERR(sdesc);
        }
 
-       c = h3;
+       c = !!h3;
        ret = crypto_shash_init(&sdesc->shash);
        if (ret < 0)
                goto out;
@@ -196,6 +199,9 @@ int TSS_checkhmac1(unsigned char *buffer,
        va_list argp;
        int ret;
 
+       if (!chip)
+               return -ENODEV;
+
        bufsize = LOAD32(buffer, TPM_SIZE_OFFSET);
        tag = LOAD16(buffer, 0);
        ordinal = command;
@@ -363,6 +369,9 @@ int trusted_tpm_send(unsigned char *cmd, size_t buflen)
 {
        int rc;
 
+       if (!chip)
+               return -ENODEV;
+
        dump_tpm_buf(cmd);
        rc = tpm_send(chip, cmd, buflen);
        dump_tpm_buf(cmd);
@@ -429,6 +438,9 @@ int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
 {
        int ret;
 
+       if (!chip)
+               return -ENODEV;
+
        INIT_BUF(tb);
        store16(tb, TPM_TAG_RQU_COMMAND);
        store32(tb, TPM_OIAP_SIZE);
@@ -1245,9 +1257,13 @@ static int __init init_trusted(void)
 {
        int ret;
 
+       /* encrypted_keys.ko depends on successful load of this module even if
+        * TPM is not used.
+        */
        chip = tpm_default_chip();
        if (!chip)
-               return -ENOENT;
+               return 0;
+
        ret = init_digests();
        if (ret < 0)
                goto err_put;
@@ -1269,10 +1285,12 @@ err_put:
 
 static void __exit cleanup_trusted(void)
 {
-       put_device(&chip->dev);
-       kfree(digests);
-       trusted_shash_release();
-       unregister_key_type(&key_type_trusted);
+       if (chip) {
+               put_device(&chip->dev);
+               kfree(digests);
+               trusted_shash_release();
+               unregister_key_type(&key_type_trusted);
+       }
 }
 
 late_initcall(init_trusted);
index bd5fe0d3204ae98b67b234a1459f01f01ddc1d78..201f7e588a29d2cdaf44feb3dfa1543585b7cac4 100644 (file)
@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #include <linux/capability.h>
+#include <linux/socket.h>
 
 #define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \
     "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append", "map"
index 6b576e58872550b991e9ae941999f9a3ffb968b7..daecdfb15a9cf91ef789ecafb76240d706f4c543 100644 (file)
@@ -828,9 +828,11 @@ void policydb_destroy(struct policydb *p)
        hashtab_map(p->range_tr, range_tr_destroy, NULL);
        hashtab_destroy(p->range_tr);
 
-       for (i = 0; i < p->p_types.nprim; i++)
-               ebitmap_destroy(&p->type_attr_map_array[i]);
-       kvfree(p->type_attr_map_array);
+       if (p->type_attr_map_array) {
+               for (i = 0; i < p->p_types.nprim; i++)
+                       ebitmap_destroy(&p->type_attr_map_array[i]);
+               kvfree(p->type_attr_map_array);
+       }
 
        ebitmap_destroy(&p->filename_trans_ttypes);
        ebitmap_destroy(&p->policycaps);
@@ -2496,10 +2498,13 @@ int policydb_read(struct policydb *p, void *fp)
        if (!p->type_attr_map_array)
                goto bad;
 
+       /* just in case ebitmap_init() becomes more than just a memset(0): */
+       for (i = 0; i < p->p_types.nprim; i++)
+               ebitmap_init(&p->type_attr_map_array[i]);
+
        for (i = 0; i < p->p_types.nprim; i++) {
                struct ebitmap *e = &p->type_attr_map_array[i];
 
-               ebitmap_init(e);
                if (p->policyvers >= POLICYDB_VERSION_AVTAB) {
                        rc = ebitmap_read(e, fp);
                        if (rc)
index 57cc60722dd3855021c56a3e46d900e1f0ad0efe..efac68556b4571e0ebef345935cdba035a13a285 100644 (file)
@@ -206,7 +206,7 @@ static void yama_ptracer_del(struct task_struct *tracer,
  * yama_task_free - check for task_pid to remove from exception list
  * @task: task being removed
  */
-void yama_task_free(struct task_struct *task)
+static void yama_task_free(struct task_struct *task)
 {
        yama_ptracer_del(task, task);
 }
@@ -222,7 +222,7 @@ void yama_task_free(struct task_struct *task)
  * Return 0 on success, -ve on error.  -ENOSYS is returned when Yama
  * does not handle the given option.
  */
-int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
                           unsigned long arg4, unsigned long arg5)
 {
        int rc = -ENOSYS;
@@ -401,7 +401,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
  *
  * Returns 0 if following the ptrace is allowed, -ve on error.
  */
-int yama_ptrace_traceme(struct task_struct *parent)
+static int yama_ptrace_traceme(struct task_struct *parent)
 {
        int rc = 0;
 
@@ -452,7 +452,7 @@ static int yama_dointvec_minmax(struct ctl_table *table, int write,
 static int zero;
 static int max_scope = YAMA_SCOPE_NO_ATTACH;
 
-struct ctl_path yama_sysctl_path[] = {
+static struct ctl_path yama_sysctl_path[] = {
        { .procname = "kernel", },
        { .procname = "yama", },
        { }
index 96a074019c33c28b5587d7d833110eced98669c0..0eb169acc85031f5f2a5bdc8e3e7b9b3b66a97b7 100644 (file)
@@ -713,8 +713,11 @@ snd_info_create_entry(const char *name, struct snd_info_entry *parent,
        INIT_LIST_HEAD(&entry->list);
        entry->parent = parent;
        entry->module = module;
-       if (parent)
+       if (parent) {
+               mutex_lock(&parent->access);
                list_add_tail(&entry->list, &parent->children);
+               mutex_unlock(&parent->access);
+       }
        return entry;
 }
 
@@ -792,7 +795,12 @@ void snd_info_free_entry(struct snd_info_entry * entry)
        list_for_each_entry_safe(p, n, &entry->children, list)
                snd_info_free_entry(p);
 
-       list_del(&entry->list);
+       p = entry->parent;
+       if (p) {
+               mutex_lock(&p->access);
+               list_del(&entry->list);
+               mutex_unlock(&p->access);
+       }
        kfree(entry->name);
        if (entry->private_free)
                entry->private_free(entry);
index 0c4dc40376a709ff2e8aabd2f9ac4d25660389be..079c12d64b0e3112361ab2a4497df41155aa7f62 100644 (file)
@@ -382,14 +382,7 @@ int snd_card_disconnect(struct snd_card *card)
        card->shutdown = 1;
        spin_unlock(&card->files_lock);
 
-       /* phase 1: disable fops (user space) operations for ALSA API */
-       mutex_lock(&snd_card_mutex);
-       snd_cards[card->number] = NULL;
-       clear_bit(card->number, snd_cards_lock);
-       mutex_unlock(&snd_card_mutex);
-       
-       /* phase 2: replace file->f_op with special dummy operations */
-       
+       /* replace file->f_op with special dummy operations */
        spin_lock(&card->files_lock);
        list_for_each_entry(mfile, &card->files_list, list) {
                /* it's critical part, use endless loop */
@@ -405,7 +398,7 @@ int snd_card_disconnect(struct snd_card *card)
        }
        spin_unlock(&card->files_lock); 
 
-       /* phase 3: notify all connected devices about disconnection */
+       /* notify all connected devices about disconnection */
        /* at this point, they cannot respond to any calls except release() */
 
 #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
@@ -421,6 +414,13 @@ int snd_card_disconnect(struct snd_card *card)
                device_del(&card->card_dev);
                card->registered = false;
        }
+
+       /* disable fops (user space) operations for ALSA API */
+       mutex_lock(&snd_card_mutex);
+       snd_cards[card->number] = NULL;
+       clear_bit(card->number, snd_cards_lock);
+       mutex_unlock(&snd_card_mutex);
+
 #ifdef CONFIG_PM
        wake_up(&card->power_sleep);
 #endif
index d5b0d7ba83c4204db42df492a5e35f54a67c470c..f6ae68017608d83cedc394cd5c11891439e8cbec 100644 (file)
@@ -940,6 +940,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
        oss_frame_size = snd_pcm_format_physical_width(params_format(params)) *
                         params_channels(params) / 8;
 
+       err = snd_pcm_oss_period_size(substream, params, sparams);
+       if (err < 0)
+               goto failure;
+
+       n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
+       err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
+       if (err < 0)
+               goto failure;
+
+       err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
+                                    runtime->oss.periods, NULL);
+       if (err < 0)
+               goto failure;
+
+       snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+
+       err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams);
+       if (err < 0) {
+               pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
+               goto failure;
+       }
+
 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
        snd_pcm_oss_plugin_clear(substream);
        if (!direct) {
@@ -974,27 +996,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
        }
 #endif
 
-       err = snd_pcm_oss_period_size(substream, params, sparams);
-       if (err < 0)
-               goto failure;
-
-       n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
-       err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
-       if (err < 0)
-               goto failure;
-
-       err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
-                                    runtime->oss.periods, NULL);
-       if (err < 0)
-               goto failure;
-
-       snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
-
-       if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) {
-               pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
-               goto failure;
-       }
-
        if (runtime->oss.trigger) {
                sw_params->start_threshold = 1;
        } else {
index f731f904e8ccb4e9671523e3b68e7825c779d8d8..1d8452912b14af7b211acc8796d1526d936007a2 100644 (file)
@@ -1445,8 +1445,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
-       if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
+       switch (runtime->status->state) {
+       case SNDRV_PCM_STATE_SUSPENDED:
                return -EBUSY;
+       /* unresumable PCM state; return -EBUSY for skipping suspend */
+       case SNDRV_PCM_STATE_OPEN:
+       case SNDRV_PCM_STATE_SETUP:
+       case SNDRV_PCM_STATE_DISCONNECTED:
+               return -EBUSY;
+       }
        runtime->trigger_master = substream;
        return 0;
 }
index ee601d7f092694aecd7e853845b4e3f73cd0f261..c0690d1ecd55c1ce33c9bc155abd82d2d0ec9edd 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/mm.h>
+#include <linux/nospec.h>
 #include <sound/rawmidi.h>
 #include <sound/info.h>
 #include <sound/control.h>
@@ -601,6 +602,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card,
                return -ENXIO;
        if (info->stream < 0 || info->stream > 1)
                return -EINVAL;
+       info->stream = array_index_nospec(info->stream, 2);
        pstr = &rmidi->streams[info->stream];
        if (pstr->substream_count == 0)
                return -ENOENT;
index 278ebb9931225998dd07f0606eeabe289d71aff5..c939459172353dee5ee651ee4694f43f2aa9be7d 100644 (file)
@@ -617,13 +617,14 @@ int
 snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf)
 {
        struct seq_oss_synth *rec;
+       struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
 
-       if (dev < 0 || dev >= dp->max_synthdev)
+       if (!info)
                return -ENXIO;
 
-       if (dp->synths[dev].is_midi) {
+       if (info->is_midi) {
                struct midi_info minf;
-               snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf);
+               snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
                inf->synth_type = SYNTH_TYPE_MIDI;
                inf->synth_subtype = 0;
                inf->nr_voices = 16;
index 7d4640d1fe9fb8a8ab8eecf045798497cb3e38f3..38e7deab638479ef9525c67fcd4f0fe37101cfb2 100644 (file)
@@ -1252,7 +1252,7 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
 
        /* fill the info fields */
        if (client_info->name[0])
-               strlcpy(client->name, client_info->name, sizeof(client->name));
+               strscpy(client->name, client_info->name, sizeof(client->name));
 
        client->filter = client_info->filter;
        client->event_lost = client_info->event_lost;
@@ -1530,7 +1530,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
        /* set queue name */
        if (!info->name[0])
                snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
-       strlcpy(q->name, info->name, sizeof(q->name));
+       strscpy(q->name, info->name, sizeof(q->name));
        snd_use_lock_free(&q->use_lock);
 
        return 0;
@@ -1592,7 +1592,7 @@ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
                queuefree(q);
                return -EPERM;
        }
-       strlcpy(q->name, info->name, sizeof(q->name));
+       strscpy(q->name, info->name, sizeof(q->name));
        queuefree(q);
 
        return 0;
index 5b02bd49fde407a5f020eb44cf09a88d5413f37e..4e4ecc21760bccd5a7da41d275c53682461ae60f 100644 (file)
@@ -41,7 +41,7 @@ void snd_opl3_timer_func(struct timer_list *t);
 
 /* Prototypes for opl3_drums.c */
 void snd_opl3_load_drums(struct snd_opl3 *opl3);
-void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int on_off, int vel, struct snd_midi_channel *chan);
+void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int vel, int on_off, struct snd_midi_channel *chan);
 
 /* Prototypes for opl3_oss.c */
 #if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
index 220e61926ea4193c02f66e3d6b8265d15be8d28b..513291ba0ab072d5e478ef936eda27d575e11c91 100644 (file)
@@ -36,7 +36,7 @@ static void name_card(struct snd_motu *motu)
        fw_csr_iterator_init(&it, motu->unit->directory);
        while (fw_csr_iterator_next(&it, &key, &val)) {
                switch (key) {
-               case CSR_VERSION:
+               case CSR_MODEL:
                        version = val;
                        break;
                }
@@ -46,7 +46,7 @@ static void name_card(struct snd_motu *motu)
        strcpy(motu->card->shortname, motu->spec->name);
        strcpy(motu->card->mixername, motu->spec->name);
        snprintf(motu->card->longname, sizeof(motu->card->longname),
-                "MOTU %s (version:%d), GUID %08x%08x at %s, S%d",
+                "MOTU %s (version:%06x), GUID %08x%08x at %s, S%d",
                 motu->spec->name, version,
                 fw_dev->config_rom[3], fw_dev->config_rom[4],
                 dev_name(&motu->unit->device), 100 << fw_dev->max_speed);
@@ -237,20 +237,20 @@ static const struct snd_motu_spec motu_audio_express = {
 #define SND_MOTU_DEV_ENTRY(model, data)                        \
 {                                                      \
        .match_flags    = IEEE1394_MATCH_VENDOR_ID |    \
-                         IEEE1394_MATCH_MODEL_ID |     \
-                         IEEE1394_MATCH_SPECIFIER_ID,  \
+                         IEEE1394_MATCH_SPECIFIER_ID | \
+                         IEEE1394_MATCH_VERSION,       \
        .vendor_id      = OUI_MOTU,                     \
-       .model_id       = model,                        \
        .specifier_id   = OUI_MOTU,                     \
+       .version        = model,                        \
        .driver_data    = (kernel_ulong_t)data,         \
 }
 
 static const struct ieee1394_device_id motu_id_table[] = {
-       SND_MOTU_DEV_ENTRY(0x101800, &motu_828mk2),
-       SND_MOTU_DEV_ENTRY(0x107800, &snd_motu_spec_traveler),
-       SND_MOTU_DEV_ENTRY(0x106800, &motu_828mk3),     /* FireWire only. */
-       SND_MOTU_DEV_ENTRY(0x100800, &motu_828mk3),     /* Hybrid. */
-       SND_MOTU_DEV_ENTRY(0x104800, &motu_audio_express),
+       SND_MOTU_DEV_ENTRY(0x000003, &motu_828mk2),
+       SND_MOTU_DEV_ENTRY(0x000009, &snd_motu_spec_traveler),
+       SND_MOTU_DEV_ENTRY(0x000015, &motu_828mk3),     /* FireWire only. */
+       SND_MOTU_DEV_ENTRY(0x000035, &motu_828mk3),     /* Hybrid. */
+       SND_MOTU_DEV_ENTRY(0x000033, &motu_audio_express),
        { }
 };
 MODULE_DEVICE_TABLE(ieee1394, motu_id_table);
index 9c37d9af3023f67bdba2ba4a3d207580cd1f70c5..ec7715c6b0c02c9bc940ed4b16f387b509bd1907 100644 (file)
@@ -107,7 +107,6 @@ int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
        INIT_LIST_HEAD(&bus->hlink_list);
        bus->idx = idx++;
 
-       mutex_init(&bus->lock);
        bus->cmd_dma_state = true;
 
        return 0;
index 012305177f68227af7bb25890a92c1ef93690221..ad8eee08013fb838e228daaa23cd974a92c34325 100644 (file)
@@ -38,6 +38,7 @@ int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
        INIT_WORK(&bus->unsol_work, snd_hdac_bus_process_unsol_events);
        spin_lock_init(&bus->reg_lock);
        mutex_init(&bus->cmd_mutex);
+       mutex_init(&bus->lock);
        bus->irq = -1;
        return 0;
 }
index 5c95933e739a43bc5cd30829e43c0381cff1e989..1ea51e3b942a034a1b487bb2ad7dc054893a4d39 100644 (file)
@@ -69,13 +69,15 @@ void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx, bool enable)
 
        dev_dbg(bus->dev, "display power %s\n",
                enable ? "enable" : "disable");
+
+       mutex_lock(&bus->lock);
        if (enable)
                set_bit(idx, &bus->display_power_status);
        else
                clear_bit(idx, &bus->display_power_status);
 
        if (!acomp || !acomp->ops)
-               return;
+               goto unlock;
 
        if (bus->display_power_status) {
                if (!bus->display_power_active) {
@@ -92,6 +94,8 @@ void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx, bool enable)
                        bus->display_power_active = false;
                }
        }
+ unlock:
+       mutex_unlock(&bus->lock);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_display_power);
 
index aa2a83eb81a988a5e5947d9edce8afae4c91cf6f..dc27a480c2d9bf8156a0376cd0f1788eb34df2b3 100644 (file)
@@ -111,6 +111,10 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
 
        /* block the 0x388 port to avoid PnP conflicts */
        acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
+       if (!acard->fm_res) {
+               err = -EBUSY;
+               goto _err;
+       }
 
        if (port[dev] != SNDRV_AUTO_PORT) {
                if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
index ea876b0b02b9f0d8400a7ee08291ff283999dbc6..dc0084dc8550daae4412f75e7bfc487c69c98beb 100644 (file)
@@ -1952,6 +1952,11 @@ static int snd_echo_create(struct snd_card *card,
        }
        chip->dsp_registers = (volatile u32 __iomem *)
                ioremap_nocache(chip->dsp_registers_phys, sz);
+       if (!chip->dsp_registers) {
+               dev_err(chip->card->dev, "ioremap failed\n");
+               snd_echo_free(chip);
+               return -ENOMEM;
+       }
 
        if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED,
                        KBUILD_MODNAME, chip)) {
index 5f2005098a60bed87fd16248c295aaff66b1c67d..701a69d856f5ff7acfb9e264e58abf5cf9f3215e 100644 (file)
@@ -969,6 +969,7 @@ int snd_hda_codec_device_new(struct hda_bus *bus, struct snd_card *card,
 
        /* power-up all before initialization */
        hda_set_power_state(codec, AC_PWRST_D0);
+       codec->core.dev.power.power_state = PMSG_ON;
 
        snd_hda_codec_proc_new(codec);
 
@@ -2939,6 +2940,20 @@ static int hda_codec_runtime_resume(struct device *dev)
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_PM_SLEEP
+static int hda_codec_force_resume(struct device *dev)
+{
+       int ret;
+
+       /* The get/put pair below enforces the runtime resume even if the
+        * device hasn't been used at suspend time.  This trick is needed to
+        * update the jack state change during the sleep.
+        */
+       pm_runtime_get_noresume(dev);
+       ret = pm_runtime_force_resume(dev);
+       pm_runtime_put(dev);
+       return ret;
+}
+
 static int hda_codec_pm_suspend(struct device *dev)
 {
        dev->power.power_state = PMSG_SUSPEND;
@@ -2948,7 +2963,7 @@ static int hda_codec_pm_suspend(struct device *dev)
 static int hda_codec_pm_resume(struct device *dev)
 {
        dev->power.power_state = PMSG_RESUME;
-       return pm_runtime_force_resume(dev);
+       return hda_codec_force_resume(dev);
 }
 
 static int hda_codec_pm_freeze(struct device *dev)
@@ -2960,13 +2975,13 @@ static int hda_codec_pm_freeze(struct device *dev)
 static int hda_codec_pm_thaw(struct device *dev)
 {
        dev->power.power_state = PMSG_THAW;
-       return pm_runtime_force_resume(dev);
+       return hda_codec_force_resume(dev);
 }
 
 static int hda_codec_pm_restore(struct device *dev)
 {
        dev->power.power_state = PMSG_RESTORE;
-       return pm_runtime_force_resume(dev);
+       return hda_codec_force_resume(dev);
 }
 #endif /* CONFIG_PM_SLEEP */
 
index e5c49003e75fdd81a62fbea142089500d0eb6eae..2ec91085fa3e7708d27a605747213f2277b9bc2b 100644 (file)
@@ -947,7 +947,7 @@ static void __azx_runtime_suspend(struct azx *chip)
        display_power(chip, false);
 }
 
-static void __azx_runtime_resume(struct azx *chip)
+static void __azx_runtime_resume(struct azx *chip, bool from_rt)
 {
        struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
        struct hdac_bus *bus = azx_bus(chip);
@@ -964,7 +964,7 @@ static void __azx_runtime_resume(struct azx *chip)
        azx_init_pci(chip);
        hda_intel_init_chip(chip, true);
 
-       if (status) {
+       if (status && from_rt) {
                list_for_each_codec(codec, &chip->bus)
                        if (status & (1 << codec->addr))
                                schedule_delayed_work(&codec->jackpoll_work,
@@ -1016,7 +1016,7 @@ static int azx_resume(struct device *dev)
                        chip->msi = 0;
        if (azx_acquire_irq(chip, 1) < 0)
                return -EIO;
-       __azx_runtime_resume(chip);
+       __azx_runtime_resume(chip, false);
        snd_power_change_state(card, SNDRV_CTL_POWER_D0);
 
        trace_azx_resume(chip);
@@ -1081,7 +1081,7 @@ static int azx_runtime_resume(struct device *dev)
        chip = card->private_data;
        if (!azx_has_pm_runtime(chip))
                return 0;
-       __azx_runtime_resume(chip);
+       __azx_runtime_resume(chip, true);
 
        /* disable controller Wake Up event*/
        azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
@@ -2142,12 +2142,18 @@ static struct snd_pci_quirk power_save_blacklist[] = {
        SND_PCI_QUIRK(0x8086, 0x2040, "Intel DZ77BH-55K", 0),
        /* https://bugzilla.kernel.org/show_bug.cgi?id=199607 */
        SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0),
+       /* https://bugs.launchpad.net/bugs/1821663 */
+       SND_PCI_QUIRK(0x8086, 0x2064, "Intel SDP 8086:2064", 0),
        /* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */
        SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
-       /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
-       SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
        /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
        SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
+       /* https://bugzilla.redhat.com/show_bug.cgi?id=1689623 */
+       SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
+       /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
+       SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
+       /* https://bugs.launchpad.net/bugs/1821663 */
+       SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
        {}
 };
 #endif /* CONFIG_PM */
index 29882bda763289374069ec4777e62b781b416673..e1ebc6d5f38226b10f689b2bc04fd0504331ced2 100644 (file)
@@ -1005,7 +1005,6 @@ struct ca0132_spec {
        unsigned int scp_resp_header;
        unsigned int scp_resp_data[4];
        unsigned int scp_resp_count;
-       bool alt_firmware_present;
        bool startup_check_entered;
        bool dsp_reload;
 
@@ -7518,7 +7517,7 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
        bool dsp_loaded = false;
        struct ca0132_spec *spec = codec->spec;
        const struct dsp_image_seg *dsp_os_image;
-       const struct firmware *fw_entry;
+       const struct firmware *fw_entry = NULL;
        /*
         * Alternate firmwares for different variants. The Recon3Di apparently
         * can use the default firmware, but I'll leave the option in case
@@ -7529,33 +7528,26 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
        case QUIRK_R3D:
        case QUIRK_AE5:
                if (request_firmware(&fw_entry, DESKTOP_EFX_FILE,
-                                       codec->card->dev) != 0) {
+                                       codec->card->dev) != 0)
                        codec_dbg(codec, "Desktop firmware not found.");
-                       spec->alt_firmware_present = false;
-               } else {
+               else
                        codec_dbg(codec, "Desktop firmware selected.");
-                       spec->alt_firmware_present = true;
-               }
                break;
        case QUIRK_R3DI:
                if (request_firmware(&fw_entry, R3DI_EFX_FILE,
-                                       codec->card->dev) != 0) {
+                                       codec->card->dev) != 0)
                        codec_dbg(codec, "Recon3Di alt firmware not detected.");
-                       spec->alt_firmware_present = false;
-               } else {
+               else
                        codec_dbg(codec, "Recon3Di firmware selected.");
-                       spec->alt_firmware_present = true;
-               }
                break;
        default:
-               spec->alt_firmware_present = false;
                break;
        }
        /*
         * Use default ctefx.bin if no alt firmware is detected, or if none
         * exists for your particular codec.
         */
-       if (!spec->alt_firmware_present) {
+       if (!fw_entry) {
                codec_dbg(codec, "Default firmware selected.");
                if (request_firmware(&fw_entry, EFX_FILE,
                                        codec->card->dev) != 0)
index 384719d5c44ec89158e781b63a912734e3d24216..42cd3945e0dee563a62cb1c9609b8c7d65b67a84 100644 (file)
@@ -1864,8 +1864,8 @@ enum {
        ALC887_FIXUP_BASS_CHMAP,
        ALC1220_FIXUP_GB_DUAL_CODECS,
        ALC1220_FIXUP_CLEVO_P950,
-       ALC1220_FIXUP_SYSTEM76_ORYP5,
-       ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
+       ALC1220_FIXUP_CLEVO_PB51ED,
+       ALC1220_FIXUP_CLEVO_PB51ED_PINS,
 };
 
 static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2070,7 +2070,7 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
 static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
                                const struct hda_fixup *fix, int action);
 
-static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
+static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec,
                                     const struct hda_fixup *fix,
                                     int action)
 {
@@ -2322,18 +2322,18 @@ static const struct hda_fixup alc882_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc1220_fixup_clevo_p950,
        },
-       [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
+       [ALC1220_FIXUP_CLEVO_PB51ED] = {
                .type = HDA_FIXUP_FUNC,
-               .v.func = alc1220_fixup_system76_oryp5,
+               .v.func = alc1220_fixup_clevo_pb51ed,
        },
-       [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
+       [ALC1220_FIXUP_CLEVO_PB51ED_PINS] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
                        { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
                        {}
                },
                .chained = true,
-               .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
+               .chain_id = ALC1220_FIXUP_CLEVO_PB51ED,
        },
 };
 
@@ -2411,8 +2411,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
-       SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
-       SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
+       SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x65d1, "Tuxedo Book XC1509", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
@@ -5449,6 +5450,8 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
                return;
 
        spec->gen.preferred_dacs = preferred_pairs;
+       spec->gen.auto_mute_via_amp = 1;
+       codec->power_save_node = 0;
 }
 
 /* The DAC of NID 0x3 will introduce click/pop noise on headphones, so invalidate it */
@@ -5491,7 +5494,7 @@ static void alc_headset_btn_callback(struct hda_codec *codec,
        jack->jack->button_state = report;
 }
 
-static void alc295_fixup_chromebook(struct hda_codec *codec,
+static void alc_fixup_headset_jack(struct hda_codec *codec,
                                    const struct hda_fixup *fix, int action)
 {
 
@@ -5501,16 +5504,6 @@ static void alc295_fixup_chromebook(struct hda_codec *codec,
                                                    alc_headset_btn_callback);
                snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false,
                                      SND_JACK_HEADSET, alc_headset_btn_keymap);
-               switch (codec->core.vendor_id) {
-               case 0x10ec0295:
-                       alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
-                       alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
-                       break;
-               case 0x10ec0236:
-                       alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
-                       alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
-                       break;
-               }
                break;
        case HDA_FIXUP_ACT_INIT:
                switch (codec->core.vendor_id) {
@@ -5531,6 +5524,25 @@ static void alc295_fixup_chromebook(struct hda_codec *codec,
        }
 }
 
+static void alc295_fixup_chromebook(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       switch (action) {
+       case HDA_FIXUP_ACT_INIT:
+               switch (codec->core.vendor_id) {
+               case 0x10ec0295:
+                       alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
+                       alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
+                       break;
+               case 0x10ec0236:
+                       alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
+                       alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
+                       break;
+               }
+               break;
+       }
+}
+
 static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
                                  const struct hda_fixup *fix, int action)
 {
@@ -5663,6 +5675,7 @@ enum {
        ALC233_FIXUP_ASUS_MIC_NO_PRESENCE,
        ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE,
        ALC233_FIXUP_LENOVO_MULTI_CODECS,
+       ALC233_FIXUP_ACER_HEADSET_MIC,
        ALC294_FIXUP_LENOVO_MIC_LOCATION,
        ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
        ALC700_FIXUP_INTEL_REFERENCE,
@@ -5684,9 +5697,13 @@ enum {
        ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
        ALC255_FIXUP_ACER_HEADSET_MIC,
        ALC295_FIXUP_CHROME_BOOK,
+       ALC225_FIXUP_HEADSET_JACK,
        ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE,
        ALC225_FIXUP_WYSE_AUTO_MUTE,
        ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
+       ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
+       ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+       ALC299_FIXUP_PREDATOR_SPK,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6487,6 +6504,16 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc233_alc662_fixup_lenovo_dual_codecs,
        },
+       [ALC233_FIXUP_ACER_HEADSET_MIC] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC233_FIXUP_ASUS_MIC_NO_PRESENCE
+       },
        [ALC294_FIXUP_LENOVO_MIC_LOCATION] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -6632,6 +6659,12 @@ static const struct hda_fixup alc269_fixups[] = {
        [ALC295_FIXUP_CHROME_BOOK] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc295_fixup_chromebook,
+               .chained = true,
+               .chain_id = ALC225_FIXUP_HEADSET_JACK
+       },
+       [ALC225_FIXUP_HEADSET_JACK] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_headset_jack,
        },
        [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
                .type = HDA_FIXUP_PINS,
@@ -6685,6 +6718,32 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
        },
+       [ALC286_FIXUP_ACER_AIO_HEADSET_MIC] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x4f },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x5029 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
+       },
+       [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+       },
+       [ALC299_FIXUP_PREDATOR_SPK] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x21, 0x90170150 }, /* use as headset mic, without its own jack detect */
+                       { }
+               }
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6701,9 +6760,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
        SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
-       SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
+       SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
@@ -7099,7 +7163,9 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC255_FIXUP_DUMMY_LINEOUT_VERB, .name = "alc255-dummy-lineout"},
        {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
        {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
-       {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"},
+       {.id = ALC225_FIXUP_HEADSET_JACK, .name = "alc-headset-jack"},
+       {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-chrome-book"},
+       {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
        {}
 };
 #define ALC225_STANDARD_PINS \
@@ -7202,6 +7268,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0x90a60140},
                {0x14, 0x90170150},
                {0x21, 0x02211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x21, 0x02211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x40000000},
+               {0x14, 0x90170110},
+               {0x21, 0x02211020}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
                {0x14, 0x90170110},
                {0x21, 0x02211020}),
@@ -7312,6 +7384,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x21, 0x0221101f}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x14, 0x90170110},
+               {0x1b, 0x01011020},
+               {0x21, 0x0221101f}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC,
                {0x14, 0x90170110},
                {0x1b, 0x90a70130},
@@ -7320,6 +7396,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x14, 0x90170110},
                {0x1b, 0x90a70130},
                {0x21, 0x03211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x14, 0x90170110},
+               {0x21, 0x03211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x14, 0x90170110},
+               {0x21, 0x04211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+               {0x1a, 0x90a70130},
+               {0x1b, 0x90170110},
+               {0x21, 0x03211020}),
        SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
                {0x12, 0xb7a60130},
                {0x13, 0xb8a61140},
@@ -7459,6 +7547,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0x90a60130},
                {0x17, 0x90170110},
                {0x21, 0x04211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
+               {0x12, 0x90a60130},
+               {0x17, 0x90170110},
+               {0x21, 0x03211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x14, 0x90170110},
+               {0x21, 0x04211020}),
        SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC295_STANDARD_PINS,
                {0x17, 0x21014020},
index 419114edfd57db8f341184148fcf4d7ab7d67a0a..667fc1d59e189f599e580654c10719b96b7e74a0 100644 (file)
@@ -1151,6 +1151,7 @@ config SND_SOC_WCD9335
        tristate "WCD9335 Codec"
        depends on SLIMBUS
        select REGMAP_SLIMBUS
+       select REGMAP_IRQ
        help
          The WCD9335 is a standalone Hi-Fi audio CODEC IC, supports
          Qualcomm Technologies, Inc. (QTI) multimedia solutions,
index 03bbbcd3b6c115254a75f367c40447f012edc8c4..87616b126018b3cb9b9a5c8f4ca7a3af51b97264 100644 (file)
@@ -2129,6 +2129,7 @@ static int ab8500_codec_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
                dev_err(dai->component->dev,
                        "%s: ERROR: The device is either a master or a slave.\n",
                        __func__);
+               /* fall through */
        default:
                dev_err(dai->component->dev,
                        "%s: ERROR: Unsupporter master mask 0x%x\n",
index 9f4a59871cee72b2011acf20d8167e12933ec2fc..c71696146c5ec17e17751903a592038c4f8347e4 100644 (file)
@@ -1635,6 +1635,16 @@ err:
        return ret;
 }
 
+static int cs35l35_i2c_remove(struct i2c_client *i2c_client)
+{
+       struct cs35l35_private *cs35l35 = i2c_get_clientdata(i2c_client);
+
+       regulator_bulk_disable(cs35l35->num_supplies, cs35l35->supplies);
+       gpiod_set_value_cansleep(cs35l35->reset_gpio, 0);
+
+       return 0;
+}
+
 static const struct of_device_id cs35l35_of_match[] = {
        {.compatible = "cirrus,cs35l35"},
        {},
@@ -1655,6 +1665,7 @@ static struct i2c_driver cs35l35_i2c_driver = {
        },
        .id_table = cs35l35_id,
        .probe = cs35l35_i2c_probe,
+       .remove = cs35l35_i2c_remove,
 };
 
 module_i2c_driver(cs35l35_i2c_driver);
index 33d74f163bd753820bb77bd81d8f0a28461410e5..793a14d586672bc2b76b143b5756206becce28a8 100644 (file)
@@ -642,6 +642,7 @@ static const struct regmap_config cs4270_regmap = {
        .reg_defaults =         cs4270_reg_defaults,
        .num_reg_defaults =     ARRAY_SIZE(cs4270_reg_defaults),
        .cache_type =           REGCACHE_RBTREE,
+       .write_flag_mask =      CS4270_I2C_INCR,
 
        .readable_reg =         cs4270_reg_is_readable,
        .volatile_reg =         cs4270_reg_is_volatile,
index ffecdaaa8cf2bb2e69bde2de7e2acb92cbdec0a1..f889d94c8e3cf707f0bcab6cdb7f860851d7042f 100644 (file)
@@ -38,6 +38,9 @@ static void hdac_hda_dai_close(struct snd_pcm_substream *substream,
                               struct snd_soc_dai *dai);
 static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream,
                                struct snd_soc_dai *dai);
+static int hdac_hda_dai_hw_params(struct snd_pcm_substream *substream,
+                                 struct snd_pcm_hw_params *params,
+                                 struct snd_soc_dai *dai);
 static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream,
                                struct snd_soc_dai *dai);
 static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai,
@@ -50,6 +53,7 @@ static const struct snd_soc_dai_ops hdac_hda_dai_ops = {
        .startup = hdac_hda_dai_open,
        .shutdown = hdac_hda_dai_close,
        .prepare = hdac_hda_dai_prepare,
+       .hw_params = hdac_hda_dai_hw_params,
        .hw_free = hdac_hda_dai_hw_free,
        .set_tdm_slot = hdac_hda_dai_set_tdm_slot,
 };
@@ -139,6 +143,39 @@ static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai,
        return 0;
 }
 
+static int hdac_hda_dai_hw_params(struct snd_pcm_substream *substream,
+                                 struct snd_pcm_hw_params *params,
+                                 struct snd_soc_dai *dai)
+{
+       struct snd_soc_component *component = dai->component;
+       struct hdac_hda_priv *hda_pvt;
+       unsigned int format_val;
+       unsigned int maxbps;
+
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               maxbps = dai->driver->playback.sig_bits;
+       else
+               maxbps = dai->driver->capture.sig_bits;
+
+       hda_pvt = snd_soc_component_get_drvdata(component);
+       format_val = snd_hdac_calc_stream_format(params_rate(params),
+                                                params_channels(params),
+                                                params_format(params),
+                                                maxbps,
+                                                0);
+       if (!format_val) {
+               dev_err(dai->dev,
+                       "invalid format_val, rate=%d, ch=%d, format=%d, maxbps=%d\n",
+                       params_rate(params), params_channels(params),
+                       params_format(params), maxbps);
+
+               return -EINVAL;
+       }
+
+       hda_pvt->pcm[dai->id].format_val[substream->stream] = format_val;
+       return 0;
+}
+
 static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream,
                                struct snd_soc_dai *dai)
 {
@@ -162,10 +199,9 @@ static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream,
                                struct snd_soc_dai *dai)
 {
        struct snd_soc_component *component = dai->component;
+       struct hda_pcm_stream *hda_stream;
        struct hdac_hda_priv *hda_pvt;
-       struct snd_pcm_runtime *runtime = substream->runtime;
        struct hdac_device *hdev;
-       struct hda_pcm_stream *hda_stream;
        unsigned int format_val;
        struct hda_pcm *pcm;
        unsigned int stream;
@@ -179,19 +215,8 @@ static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream,
 
        hda_stream = &pcm->stream[substream->stream];
 
-       format_val = snd_hdac_calc_stream_format(runtime->rate,
-                                                runtime->channels,
-                                                runtime->format,
-                                                hda_stream->maxbps,
-                                                0);
-       if (!format_val) {
-               dev_err(&hdev->dev,
-                       "invalid format_val, rate=%d, ch=%d, format=%d\n",
-                       runtime->rate, runtime->channels, runtime->format);
-               return -EINVAL;
-       }
-
        stream = hda_pvt->pcm[dai->id].stream_tag[substream->stream];
+       format_val = hda_pvt->pcm[dai->id].format_val[substream->stream];
 
        ret = snd_hda_codec_prepare(&hda_pvt->codec, hda_stream,
                                    stream, format_val, substream);
index e444ef5933606ce8689f06ba187f3e84491d6336..6b1bd4f428e70ed4037adef636ef9c42878d2865 100644 (file)
@@ -8,6 +8,7 @@
 
 struct hdac_hda_pcm {
        int stream_tag[2];
+       unsigned int format_val[2];
 };
 
 struct hdac_hda_priv {
index e5b6769b9797724ceef38f3b8132200876f227e8..35df73e42cbc5f9d7bf6af4aa954bde49e8565e2 100644 (file)
@@ -484,9 +484,6 @@ static int hdmi_codec_hw_params(struct snd_pcm_substream *substream,
                params_width(params), params_rate(params),
                params_channels(params));
 
-       if (params_width(params) > 24)
-               params->msbits = 24;
-
        ret = snd_pcm_create_iec958_consumer_hw_params(params, hp.iec.status,
                                                       sizeof(hp.iec.status));
        if (ret < 0) {
@@ -529,73 +526,71 @@ static int hdmi_codec_set_fmt(struct snd_soc_dai *dai,
 {
        struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
        struct hdmi_codec_daifmt cf = { 0 };
-       int ret = 0;
 
        dev_dbg(dai->dev, "%s()\n", __func__);
 
-       if (dai->id == DAI_ID_SPDIF) {
-               cf.fmt = HDMI_SPDIF;
-       } else {
-               switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
-               case SND_SOC_DAIFMT_CBM_CFM:
-                       cf.bit_clk_master = 1;
-                       cf.frame_clk_master = 1;
-                       break;
-               case SND_SOC_DAIFMT_CBS_CFM:
-                       cf.frame_clk_master = 1;
-                       break;
-               case SND_SOC_DAIFMT_CBM_CFS:
-                       cf.bit_clk_master = 1;
-                       break;
-               case SND_SOC_DAIFMT_CBS_CFS:
-                       break;
-               default:
-                       return -EINVAL;
-               }
+       if (dai->id == DAI_ID_SPDIF)
+               return 0;
+
+       switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+       case SND_SOC_DAIFMT_CBM_CFM:
+               cf.bit_clk_master = 1;
+               cf.frame_clk_master = 1;
+               break;
+       case SND_SOC_DAIFMT_CBS_CFM:
+               cf.frame_clk_master = 1;
+               break;
+       case SND_SOC_DAIFMT_CBM_CFS:
+               cf.bit_clk_master = 1;
+               break;
+       case SND_SOC_DAIFMT_CBS_CFS:
+               break;
+       default:
+               return -EINVAL;
+       }
 
-               switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
-               case SND_SOC_DAIFMT_NB_NF:
-                       break;
-               case SND_SOC_DAIFMT_NB_IF:
-                       cf.frame_clk_inv = 1;
-                       break;
-               case SND_SOC_DAIFMT_IB_NF:
-                       cf.bit_clk_inv = 1;
-                       break;
-               case SND_SOC_DAIFMT_IB_IF:
-                       cf.frame_clk_inv = 1;
-                       cf.bit_clk_inv = 1;
-                       break;
-               }
+       switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+       case SND_SOC_DAIFMT_NB_NF:
+               break;
+       case SND_SOC_DAIFMT_NB_IF:
+               cf.frame_clk_inv = 1;
+               break;
+       case SND_SOC_DAIFMT_IB_NF:
+               cf.bit_clk_inv = 1;
+               break;
+       case SND_SOC_DAIFMT_IB_IF:
+               cf.frame_clk_inv = 1;
+               cf.bit_clk_inv = 1;
+               break;
+       }
 
-               switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
-               case SND_SOC_DAIFMT_I2S:
-                       cf.fmt = HDMI_I2S;
-                       break;
-               case SND_SOC_DAIFMT_DSP_A:
-                       cf.fmt = HDMI_DSP_A;
-                       break;
-               case SND_SOC_DAIFMT_DSP_B:
-                       cf.fmt = HDMI_DSP_B;
-                       break;
-               case SND_SOC_DAIFMT_RIGHT_J:
-                       cf.fmt = HDMI_RIGHT_J;
-                       break;
-               case SND_SOC_DAIFMT_LEFT_J:
-                       cf.fmt = HDMI_LEFT_J;
-                       break;
-               case SND_SOC_DAIFMT_AC97:
-                       cf.fmt = HDMI_AC97;
-                       break;
-               default:
-                       dev_err(dai->dev, "Invalid DAI interface format\n");
-                       return -EINVAL;
-               }
+       switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+       case SND_SOC_DAIFMT_I2S:
+               cf.fmt = HDMI_I2S;
+               break;
+       case SND_SOC_DAIFMT_DSP_A:
+               cf.fmt = HDMI_DSP_A;
+               break;
+       case SND_SOC_DAIFMT_DSP_B:
+               cf.fmt = HDMI_DSP_B;
+               break;
+       case SND_SOC_DAIFMT_RIGHT_J:
+               cf.fmt = HDMI_RIGHT_J;
+               break;
+       case SND_SOC_DAIFMT_LEFT_J:
+               cf.fmt = HDMI_LEFT_J;
+               break;
+       case SND_SOC_DAIFMT_AC97:
+               cf.fmt = HDMI_AC97;
+               break;
+       default:
+               dev_err(dai->dev, "Invalid DAI interface format\n");
+               return -EINVAL;
        }
 
        hcp->daifmt[dai->id] = cf;
 
-       return ret;
+       return 0;
 }
 
 static int hdmi_codec_digital_mute(struct snd_soc_dai *dai, int mute)
@@ -792,8 +787,10 @@ static int hdmi_codec_probe(struct platform_device *pdev)
                i++;
        }
 
-       if (hcd->spdif)
+       if (hcd->spdif) {
                hcp->daidrv[i] = hdmi_spdif_dai;
+               hcp->daifmt[DAI_ID_SPDIF].fmt = HDMI_SPDIF;
+       }
 
        dev_set_drvdata(dev, hcp);
 
index bfd74b86c9d2f43b8e19d8bd4c211c6d2f3cd887..645aa07941237d13cbebdf0d4bf17130f9dae1ae 100644 (file)
@@ -411,9 +411,9 @@ static const struct snd_soc_dapm_widget nau8810_dapm_widgets[] = {
        SND_SOC_DAPM_MIXER("Mono Mixer", NAU8810_REG_POWER3,
                NAU8810_MOUTMX_EN_SFT, 0, &nau8810_mono_mixer_controls[0],
                ARRAY_SIZE(nau8810_mono_mixer_controls)),
-       SND_SOC_DAPM_DAC("DAC", "HiFi Playback", NAU8810_REG_POWER3,
+       SND_SOC_DAPM_DAC("DAC", "Playback", NAU8810_REG_POWER3,
                NAU8810_DAC_EN_SFT, 0),
-       SND_SOC_DAPM_ADC("ADC", "HiFi Capture", NAU8810_REG_POWER2,
+       SND_SOC_DAPM_ADC("ADC", "Capture", NAU8810_REG_POWER2,
                NAU8810_ADC_EN_SFT, 0),
        SND_SOC_DAPM_PGA("SpkN Out", NAU8810_REG_POWER3,
                NAU8810_NSPK_EN_SFT, 0, NULL, 0),
index 87ed3dc496dc2de72043c8dbd5c66cbed7c0f923..5ab05e75edeac61219945488a455eafdc8f785f1 100644 (file)
@@ -681,8 +681,8 @@ static const struct snd_soc_dapm_widget nau8824_dapm_widgets[] = {
        SND_SOC_DAPM_ADC("ADCR", NULL, NAU8824_REG_ANALOG_ADC_2,
                NAU8824_ADCR_EN_SFT, 0),
 
-       SND_SOC_DAPM_AIF_OUT("AIFTX", "HiFi Capture", 0, SND_SOC_NOPM, 0, 0),
-       SND_SOC_DAPM_AIF_IN("AIFRX", "HiFi Playback", 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_AIF_IN("AIFRX", "Playback", 0, SND_SOC_NOPM, 0, 0),
 
        SND_SOC_DAPM_DAC("DACL", NULL, NAU8824_REG_RDAC,
                NAU8824_DACL_EN_SFT, 0),
@@ -831,6 +831,36 @@ static void nau8824_int_status_clear_all(struct regmap *regmap)
        }
 }
 
+static void nau8824_dapm_disable_pin(struct nau8824 *nau8824, const char *pin)
+{
+       struct snd_soc_dapm_context *dapm = nau8824->dapm;
+       const char *prefix = dapm->component->name_prefix;
+       char prefixed_pin[80];
+
+       if (prefix) {
+               snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
+                        prefix, pin);
+               snd_soc_dapm_disable_pin(dapm, prefixed_pin);
+       } else {
+               snd_soc_dapm_disable_pin(dapm, pin);
+       }
+}
+
+static void nau8824_dapm_enable_pin(struct nau8824 *nau8824, const char *pin)
+{
+       struct snd_soc_dapm_context *dapm = nau8824->dapm;
+       const char *prefix = dapm->component->name_prefix;
+       char prefixed_pin[80];
+
+       if (prefix) {
+               snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
+                        prefix, pin);
+               snd_soc_dapm_force_enable_pin(dapm, prefixed_pin);
+       } else {
+               snd_soc_dapm_force_enable_pin(dapm, pin);
+       }
+}
+
 static void nau8824_eject_jack(struct nau8824 *nau8824)
 {
        struct snd_soc_dapm_context *dapm = nau8824->dapm;
@@ -839,8 +869,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824)
        /* Clear all interruption status */
        nau8824_int_status_clear_all(regmap);
 
-       snd_soc_dapm_disable_pin(dapm, "SAR");
-       snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+       nau8824_dapm_disable_pin(nau8824, "SAR");
+       nau8824_dapm_disable_pin(nau8824, "MICBIAS");
        snd_soc_dapm_sync(dapm);
 
        /* Enable the insertion interruption, disable the ejection
@@ -870,8 +900,8 @@ static void nau8824_jdet_work(struct work_struct *work)
        struct regmap *regmap = nau8824->regmap;
        int adc_value, event = 0, event_mask = 0;
 
-       snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
-       snd_soc_dapm_force_enable_pin(dapm, "SAR");
+       nau8824_dapm_enable_pin(nau8824, "MICBIAS");
+       nau8824_dapm_enable_pin(nau8824, "SAR");
        snd_soc_dapm_sync(dapm);
 
        msleep(100);
@@ -882,8 +912,8 @@ static void nau8824_jdet_work(struct work_struct *work)
        if (adc_value < HEADSET_SARADC_THD) {
                event |= SND_JACK_HEADPHONE;
 
-               snd_soc_dapm_disable_pin(dapm, "SAR");
-               snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+               nau8824_dapm_disable_pin(nau8824, "SAR");
+               nau8824_dapm_disable_pin(nau8824, "MICBIAS");
                snd_soc_dapm_sync(dapm);
        } else {
                event |= SND_JACK_HEADSET;
index 9d5acd2d04abd47281b26d8f81331182c0288fb8..86a7fa31c294b2d3fb00494dd3c934976c2a2044 100644 (file)
@@ -910,13 +910,21 @@ static int rt5682_headset_detect(struct snd_soc_component *component,
                int jack_insert)
 {
        struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
-       struct snd_soc_dapm_context *dapm =
-               snd_soc_component_get_dapm(component);
        unsigned int val, count;
 
        if (jack_insert) {
-               snd_soc_dapm_force_enable_pin(dapm, "CBJ Power");
-               snd_soc_dapm_sync(dapm);
+
+               snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
+                       RT5682_PWR_VREF2 | RT5682_PWR_MB,
+                       RT5682_PWR_VREF2 | RT5682_PWR_MB);
+               snd_soc_component_update_bits(component,
+                               RT5682_PWR_ANLG_1, RT5682_PWR_FV2, 0);
+               usleep_range(15000, 20000);
+               snd_soc_component_update_bits(component,
+                               RT5682_PWR_ANLG_1, RT5682_PWR_FV2, RT5682_PWR_FV2);
+               snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
+                       RT5682_PWR_CBJ, RT5682_PWR_CBJ);
+
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
                        RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH);
 
@@ -944,8 +952,10 @@ static int rt5682_headset_detect(struct snd_soc_component *component,
                rt5682_enable_push_button_irq(component, false);
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
                        RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW);
-               snd_soc_dapm_disable_pin(dapm, "CBJ Power");
-               snd_soc_dapm_sync(dapm);
+               snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
+                       RT5682_PWR_VREF2 | RT5682_PWR_MB, 0);
+               snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
+                       RT5682_PWR_CBJ, 0);
 
                rt5682->jack_type = 0;
        }
@@ -1198,7 +1208,7 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
        struct snd_soc_component *component =
                snd_soc_dapm_to_component(w->dapm);
        struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
-       int ref, val, reg, sft, mask, idx = -EINVAL;
+       int ref, val, reg, idx = -EINVAL;
        static const int div_f[] = {1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48};
        static const int div_o[] = {1, 2, 4, 6, 8, 12, 16, 24, 32, 48};
 
@@ -1212,15 +1222,10 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
 
        idx = rt5682_div_sel(rt5682, ref, div_f, ARRAY_SIZE(div_f));
 
-       if (w->shift == RT5682_PWR_ADC_S1F_BIT) {
+       if (w->shift == RT5682_PWR_ADC_S1F_BIT)
                reg = RT5682_PLL_TRACK_3;
-               sft = RT5682_ADC_OSR_SFT;
-               mask = RT5682_ADC_OSR_MASK;
-       } else {
+       else
                reg = RT5682_PLL_TRACK_2;
-               sft = RT5682_DAC_OSR_SFT;
-               mask = RT5682_DAC_OSR_MASK;
-       }
 
        snd_soc_component_update_bits(component, reg,
                RT5682_FILTER_CLK_DIV_MASK, idx << RT5682_FILTER_CLK_DIV_SFT);
@@ -1232,7 +1237,8 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
        }
 
        snd_soc_component_update_bits(component, RT5682_ADDA_CLK_1,
-               mask, idx << sft);
+               RT5682_ADC_OSR_MASK | RT5682_DAC_OSR_MASK,
+               (idx << RT5682_ADC_OSR_SFT) | (idx << RT5682_DAC_OSR_SFT));
 
        return 0;
 }
@@ -1591,8 +1597,6 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
                0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("Vref1", RT5682_PWR_ANLG_1, RT5682_PWR_VREF1_BIT, 0,
                rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
-       SND_SOC_DAPM_SUPPLY("Vref2", RT5682_PWR_ANLG_1, RT5682_PWR_VREF2_BIT, 0,
-               rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
 
        /* ASRC */
        SND_SOC_DAPM_SUPPLY_S("DAC STO1 ASRC", 1, RT5682_PLL_TRACK_1,
@@ -1627,9 +1631,6 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
        SND_SOC_DAPM_PGA("BST1 CBJ", SND_SOC_NOPM,
                0, 0, NULL, 0),
 
-       SND_SOC_DAPM_SUPPLY("CBJ Power", RT5682_PWR_ANLG_3,
-               RT5682_PWR_CBJ_BIT, 0, NULL, 0),
-
        /* REC Mixer */
        SND_SOC_DAPM_MIXER("RECMIX1L", SND_SOC_NOPM, 0, 0, rt5682_rec1_l_mix,
                ARRAY_SIZE(rt5682_rec1_l_mix)),
@@ -1792,17 +1793,13 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
 
        /*Vref*/
        {"MICBIAS1", NULL, "Vref1"},
-       {"MICBIAS1", NULL, "Vref2"},
        {"MICBIAS2", NULL, "Vref1"},
-       {"MICBIAS2", NULL, "Vref2"},
 
        {"CLKDET SYS", NULL, "CLKDET"},
 
        {"IN1P", NULL, "LDO2"},
 
        {"BST1 CBJ", NULL, "IN1P"},
-       {"BST1 CBJ", NULL, "CBJ Power"},
-       {"CBJ Power", NULL, "Vref2"},
 
        {"RECMIX1L", "CBJ Switch", "BST1 CBJ"},
        {"RECMIX1L", NULL, "RECMIX1L Power"},
@@ -1912,9 +1909,7 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
        {"HP Amp", NULL, "Capless"},
        {"HP Amp", NULL, "Charge Pump"},
        {"HP Amp", NULL, "CLKDET SYS"},
-       {"HP Amp", NULL, "CBJ Power"},
        {"HP Amp", NULL, "Vref1"},
-       {"HP Amp", NULL, "Vref2"},
        {"HPOL Playback", "Switch", "HP Amp"},
        {"HPOR Playback", "Switch", "HP Amp"},
        {"HPOL", NULL, "HPOL Playback"},
@@ -2303,16 +2298,13 @@ static int rt5682_set_bias_level(struct snd_soc_component *component,
        switch (level) {
        case SND_SOC_BIAS_PREPARE:
                regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
-                       RT5682_PWR_MB | RT5682_PWR_BG,
-                       RT5682_PWR_MB | RT5682_PWR_BG);
+                       RT5682_PWR_BG, RT5682_PWR_BG);
                regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1,
                        RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO,
                        RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO);
                break;
 
        case SND_SOC_BIAS_STANDBY:
-               regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
-                       RT5682_PWR_MB, RT5682_PWR_MB);
                regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1,
                        RT5682_DIG_GATE_CTRL, RT5682_DIG_GATE_CTRL);
                break;
@@ -2320,7 +2312,7 @@ static int rt5682_set_bias_level(struct snd_soc_component *component,
                regmap_update_bits(rt5682->regmap, RT5682_PWR_DIG_1,
                        RT5682_DIG_GATE_CTRL | RT5682_PWR_LDO, 0);
                regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
-                       RT5682_PWR_MB | RT5682_PWR_BG, 0);
+                       RT5682_PWR_BG, 0);
                break;
 
        default:
@@ -2363,6 +2355,8 @@ static int rt5682_resume(struct snd_soc_component *component)
        regcache_cache_only(rt5682->regmap, false);
        regcache_sync(rt5682->regmap);
 
+       rt5682_irq(0, rt5682);
+
        return 0;
 }
 #else
index 385fa2e9525abe2c89fc624baef239b2541b7328..22c3a6bc0b6c47ae90de8fc435127b089fb80517 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright 2011 NW Digital Radio
  *
- * Author: Jeremy McDermond <nh6z@nh6z.net>
+ * Author: Annaliese McDermond <nh6z@nh6z.net>
  *
  * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27.
  *
@@ -72,5 +72,5 @@ static struct i2c_driver aic32x4_i2c_driver = {
 module_i2c_driver(aic32x4_i2c_driver);
 
 MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver I2C");
-MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>");
+MODULE_AUTHOR("Annaliese McDermond <nh6z@nh6z.net>");
 MODULE_LICENSE("GPL");
index 07d78ae51e05c77bbd0a93410d0df2449be5d759..aa5b7ba0254bc6b7e009ce2adb6099d2d138819c 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright 2011 NW Digital Radio
  *
- * Author: Jeremy McDermond <nh6z@nh6z.net>
+ * Author: Annaliese McDermond <nh6z@nh6z.net>
  *
  * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27.
  *
@@ -74,5 +74,5 @@ static struct spi_driver aic32x4_spi_driver = {
 module_spi_driver(aic32x4_spi_driver);
 
 MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver SPI");
-MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>");
+MODULE_AUTHOR("Annaliese McDermond <nh6z@nh6z.net>");
 MODULE_LICENSE("GPL");
index 96f1526cb258a4e718afb86712aece3608c18003..5520044929f42ff6c51d18fb8338602127c79b91 100644 (file)
@@ -490,6 +490,8 @@ static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = {
        SND_SOC_DAPM_INPUT("IN2_R"),
        SND_SOC_DAPM_INPUT("IN3_L"),
        SND_SOC_DAPM_INPUT("IN3_R"),
+       SND_SOC_DAPM_INPUT("CM_L"),
+       SND_SOC_DAPM_INPUT("CM_R"),
 };
 
 static const struct snd_soc_dapm_route aic32x4_dapm_routes[] = {
index 283583d1db60555f0831229be3b2f0e1a65bafee..516d17cb2182287f8f739a072fe3128687cecd93 100644 (file)
@@ -1609,7 +1609,6 @@ static int aic3x_probe(struct snd_soc_component *component)
        struct aic3x_priv *aic3x = snd_soc_component_get_drvdata(component);
        int ret, i;
 
-       INIT_LIST_HEAD(&aic3x->list);
        aic3x->component = component;
 
        for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++) {
@@ -1873,6 +1872,7 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
        if (ret != 0)
                goto err_gpio;
 
+       INIT_LIST_HEAD(&aic3x->list);
        list_add(&aic3x->list, &reset_list);
 
        return 0;
@@ -1889,6 +1889,8 @@ static int aic3x_i2c_remove(struct i2c_client *client)
 {
        struct aic3x_priv *aic3x = i2c_get_clientdata(client);
 
+       list_del(&aic3x->list);
+
        if (gpio_is_valid(aic3x->gpio_reset) &&
            !aic3x_is_shared_reset(aic3x)) {
                gpio_set_value(aic3x->gpio_reset, 0);
index b93fdc8d2d6fb76112eabc4fe2a6a302fa86b710..b0b48eb9c7c91578cb5d588955a0afa74f0ff262 100644 (file)
@@ -2905,6 +2905,8 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
                if (wm_adsp_fw[dsp->fw].num_caps != 0)
                        wm_adsp_buffer_free(dsp);
 
+               dsp->fatal_error = false;
+
                mutex_unlock(&dsp->pwr_lock);
 
                adsp_dbg(dsp, "Execution stopped\n");
@@ -3000,6 +3002,9 @@ static int wm_adsp_compr_attach(struct wm_adsp_compr *compr)
 {
        struct wm_adsp_compr_buf *buf = NULL, *tmp;
 
+       if (compr->dsp->fatal_error)
+               return -EINVAL;
+
        list_for_each_entry(tmp, &compr->dsp->buffer_list, list) {
                if (!tmp->name || !strcmp(compr->name, tmp->name)) {
                        buf = tmp;
@@ -3535,11 +3540,11 @@ static int wm_adsp_buffer_get_error(struct wm_adsp_compr_buf *buf)
 
        ret = wm_adsp_buffer_read(buf, HOST_BUFFER_FIELD(error), &buf->error);
        if (ret < 0) {
-               adsp_err(buf->dsp, "Failed to check buffer error: %d\n", ret);
+               compr_err(buf, "Failed to check buffer error: %d\n", ret);
                return ret;
        }
        if (buf->error != 0) {
-               adsp_err(buf->dsp, "Buffer error occurred: %d\n", buf->error);
+               compr_err(buf, "Buffer error occurred: %d\n", buf->error);
                return -EIO;
        }
 
@@ -3571,8 +3576,6 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
                if (ret < 0)
                        break;
 
-               wm_adsp_buffer_clear(compr->buf);
-
                /* Trigger the IRQ at one fragment of data */
                ret = wm_adsp_buffer_write(compr->buf,
                                           HOST_BUFFER_FIELD(high_water_mark),
@@ -3584,6 +3587,8 @@ int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
                }
                break;
        case SNDRV_PCM_TRIGGER_STOP:
+               if (wm_adsp_compr_attached(compr))
+                       wm_adsp_buffer_clear(compr->buf);
                break;
        default:
                ret = -EINVAL;
@@ -3917,22 +3922,40 @@ int wm_adsp2_lock(struct wm_adsp *dsp, unsigned int lock_regions)
 }
 EXPORT_SYMBOL_GPL(wm_adsp2_lock);
 
+static void wm_adsp_fatal_error(struct wm_adsp *dsp)
+{
+       struct wm_adsp_compr *compr;
+
+       dsp->fatal_error = true;
+
+       list_for_each_entry(compr, &dsp->compr_list, list) {
+               if (compr->stream) {
+                       snd_compr_stop_error(compr->stream,
+                                            SNDRV_PCM_STATE_XRUN);
+                       snd_compr_fragment_elapsed(compr->stream);
+               }
+       }
+}
+
 irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
 {
        unsigned int val;
        struct regmap *regmap = dsp->regmap;
        int ret = 0;
 
+       mutex_lock(&dsp->pwr_lock);
+
        ret = regmap_read(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL, &val);
        if (ret) {
                adsp_err(dsp,
                        "Failed to read Region Lock Ctrl register: %d\n", ret);
-               return IRQ_HANDLED;
+               goto error;
        }
 
        if (val & ADSP2_WDT_TIMEOUT_STS_MASK) {
                adsp_err(dsp, "watchdog timeout error\n");
                wm_adsp_stop_watchdog(dsp);
+               wm_adsp_fatal_error(dsp);
        }
 
        if (val & (ADSP2_SLAVE_ERR_MASK | ADSP2_REGION_LOCK_ERR_MASK)) {
@@ -3946,7 +3969,7 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
                        adsp_err(dsp,
                                 "Failed to read Bus Err Addr register: %d\n",
                                 ret);
-                       return IRQ_HANDLED;
+                       goto error;
                }
 
                adsp_err(dsp, "bus error address = 0x%x\n",
@@ -3959,7 +3982,7 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
                        adsp_err(dsp,
                                 "Failed to read Pmem Xmem Err Addr register: %d\n",
                                 ret);
-                       return IRQ_HANDLED;
+                       goto error;
                }
 
                adsp_err(dsp, "xmem error address = 0x%x\n",
@@ -3972,6 +3995,9 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
        regmap_update_bits(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL,
                           ADSP2_CTRL_ERR_EINT, ADSP2_CTRL_ERR_EINT);
 
+error:
+       mutex_unlock(&dsp->pwr_lock);
+
        return IRQ_HANDLED;
 }
 EXPORT_SYMBOL_GPL(wm_adsp2_bus_error);
index 59e07ad163296c3ff21f238ccdfca96b6af43c76..8f09b4419a914ae773558c6175529b1acebf9a82 100644 (file)
@@ -85,6 +85,7 @@ struct wm_adsp {
        bool preloaded;
        bool booted;
        bool running;
+       bool fatal_error;
 
        struct list_head ctl_list;
 
index 528e8b108422971eea52655b55642ac5cdbe575d..0b937924d2e47961d697d068edf3944772058baa 100644 (file)
@@ -445,6 +445,19 @@ struct dma_chan *fsl_asrc_get_dma_channel(struct fsl_asrc_pair *pair, bool dir)
 }
 EXPORT_SYMBOL_GPL(fsl_asrc_get_dma_channel);
 
+static int fsl_asrc_dai_startup(struct snd_pcm_substream *substream,
+                               struct snd_soc_dai *dai)
+{
+       struct fsl_asrc *asrc_priv = snd_soc_dai_get_drvdata(dai);
+
+       /* Odd channel number is not valid for older ASRC (channel_bits==3) */
+       if (asrc_priv->channel_bits == 3)
+               snd_pcm_hw_constraint_step(substream->runtime, 0,
+                                          SNDRV_PCM_HW_PARAM_CHANNELS, 2);
+
+       return 0;
+}
+
 static int fsl_asrc_dai_hw_params(struct snd_pcm_substream *substream,
                                  struct snd_pcm_hw_params *params,
                                  struct snd_soc_dai *dai)
@@ -539,6 +552,7 @@ static int fsl_asrc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
 }
 
 static const struct snd_soc_dai_ops fsl_asrc_dai_ops = {
+       .startup      = fsl_asrc_dai_startup,
        .hw_params    = fsl_asrc_dai_hw_params,
        .hw_free      = fsl_asrc_dai_hw_free,
        .trigger      = fsl_asrc_dai_trigger,
index afe67c865330e39c7b3d1b30bd6127dd764f42aa..3623aa9a6f2ea7838e2c855a5d88681436ac11c1 100644 (file)
@@ -54,6 +54,8 @@ struct fsl_esai {
        u32 fifo_depth;
        u32 slot_width;
        u32 slots;
+       u32 tx_mask;
+       u32 rx_mask;
        u32 hck_rate[2];
        u32 sck_rate[2];
        bool hck_dir[2];
@@ -361,21 +363,13 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
        regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
                           ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
 
-       regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
-                          ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
-       regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
-                          ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
-
        regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
                           ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
 
-       regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
-                          ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
-       regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
-                          ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
-
        esai_priv->slot_width = slot_width;
        esai_priv->slots = slots;
+       esai_priv->tx_mask = tx_mask;
+       esai_priv->rx_mask = rx_mask;
 
        return 0;
 }
@@ -596,6 +590,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
        bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
        u8 i, channels = substream->runtime->channels;
        u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
+       u32 mask;
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
@@ -608,15 +603,38 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
                for (i = 0; tx && i < channels; i++)
                        regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0);
 
+               /*
+                * When set the TE/RE in the end of enablement flow, there
+                * will be channel swap issue for multi data line case.
+                * In order to workaround this issue, we switch the bit
+                * enablement sequence to below sequence
+                * 1) clear the xSMB & xSMA: which is done in probe and
+                *                           stop state.
+                * 2) set TE/RE
+                * 3) set xSMB
+                * 4) set xSMA:  xSMA is the last one in this flow, which
+                *               will trigger esai to start.
+                */
                regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
                                   tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK,
                                   tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins));
+               mask = tx ? esai_priv->tx_mask : esai_priv->rx_mask;
+
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
+                                  ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(mask));
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
+                                  ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(mask));
+
                break;
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
                                   tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0);
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
+                                  ESAI_xSMA_xS_MASK, 0);
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
+                                  ESAI_xSMB_xS_MASK, 0);
 
                /* Disable and reset FIFO */
                regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx),
@@ -906,6 +924,15 @@ static int fsl_esai_probe(struct platform_device *pdev)
                return ret;
        }
 
+       esai_priv->tx_mask = 0xFFFFFFFF;
+       esai_priv->rx_mask = 0xFFFFFFFF;
+
+       /* Clear the TSMA, TSMB, RSMA, RSMB */
+       regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0);
+       regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0);
+       regmap_write(esai_priv->regmap, REG_ESAI_RSMA, 0);
+       regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0);
+
        ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
                                              &fsl_esai_dai, 1);
        if (ret) {
index bb12351330e8c0f307b0237ee2e2f1b6b2f9aeec..69bc4848d7876cec544d4ab5067230092953b806 100644 (file)
@@ -20,6 +20,8 @@
 #include <linux/string.h>
 #include <sound/simple_card_utils.h>
 
+#define DPCM_SELECTABLE 1
+
 struct graph_priv {
        struct snd_soc_card snd_card;
        struct graph_dai_props {
@@ -440,6 +442,7 @@ static int graph_for_each_link(struct graph_priv *priv,
        struct device_node *codec_port;
        struct device_node *codec_port_old = NULL;
        struct asoc_simple_card_data adata;
+       uintptr_t dpcm_selectable = (uintptr_t)of_device_get_match_data(dev);
        int rc, ret;
 
        /* loop for all listed CPU port */
@@ -470,8 +473,9 @@ static int graph_for_each_link(struct graph_priv *priv,
                         * if Codec port has many endpoints,
                         * or has convert-xxx property
                         */
-                       if ((of_get_child_count(codec_port) > 1) ||
-                           adata.convert_rate || adata.convert_channels)
+                       if (dpcm_selectable &&
+                           ((of_get_child_count(codec_port) > 1) ||
+                            adata.convert_rate || adata.convert_channels))
                                ret = func_dpcm(priv, cpu_ep, codec_ep, li,
                                                (codec_port_old == codec_port));
                        /* else normal sound */
@@ -732,7 +736,8 @@ static int graph_remove(struct platform_device *pdev)
 
 static const struct of_device_id graph_of_match[] = {
        { .compatible = "audio-graph-card", },
-       { .compatible = "audio-graph-scu-card", },
+       { .compatible = "audio-graph-scu-card",
+         .data = (void *)DPCM_SELECTABLE },
        {},
 };
 MODULE_DEVICE_TABLE(of, graph_of_match);
index 7147bba45a2a61b0830ed49e6057edb3935c9c03..34de32efc4c4defd14c823b904931f50886b9c69 100644 (file)
@@ -9,12 +9,15 @@
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/string.h>
 #include <sound/simple_card.h>
 #include <sound/soc-dai.h>
 #include <sound/soc.h>
 
+#define DPCM_SELECTABLE 1
+
 struct simple_priv {
        struct snd_soc_card snd_card;
        struct simple_dai_props {
@@ -441,6 +444,7 @@ static int simple_for_each_link(struct simple_priv *priv,
        struct device *dev = simple_priv_to_dev(priv);
        struct device_node *top = dev->of_node;
        struct device_node *node;
+       uintptr_t dpcm_selectable = (uintptr_t)of_device_get_match_data(dev);
        bool is_top = 0;
        int ret = 0;
 
@@ -480,8 +484,9 @@ static int simple_for_each_link(struct simple_priv *priv,
                         * if it has many CPUs,
                         * or has convert-xxx property
                         */
-                       if (num > 2 ||
-                           adata.convert_rate || adata.convert_channels)
+                       if (dpcm_selectable &&
+                           (num > 2 ||
+                            adata.convert_rate || adata.convert_channels))
                                ret = func_dpcm(priv, np, codec, li, is_top);
                        /* else normal sound */
                        else
@@ -822,7 +827,8 @@ static int simple_remove(struct platform_device *pdev)
 
 static const struct of_device_id simple_of_match[] = {
        { .compatible = "simple-audio-card", },
-       { .compatible = "simple-scu-audio-card", },
+       { .compatible = "simple-scu-audio-card",
+         .data = (void *)DPCM_SELECTABLE },
        {},
 };
 MODULE_DEVICE_TABLE(of, simple_of_match);
index 08cea5b5cda9fa9f6f617213c1fba5beef4bd489..0e8b1c5eec888b4c988206a04a3a8e1ca65e14aa 100644 (file)
@@ -706,9 +706,17 @@ static int sst_soc_probe(struct snd_soc_component *component)
        return sst_dsp_init_v2_dpcm(component);
 }
 
+static void sst_soc_remove(struct snd_soc_component *component)
+{
+       struct sst_data *drv = dev_get_drvdata(component->dev);
+
+       drv->soc_card = NULL;
+}
+
 static const struct snd_soc_component_driver sst_soc_platform_drv  = {
        .name           = DRV_NAME,
        .probe          = sst_soc_probe,
+       .remove         = sst_soc_remove,
        .ops            = &sst_platform_ops,
        .compr_ops      = &sst_platform_compr_ops,
        .pcm_new        = sst_pcm_new,
index 3263b0495853c2d57e22cb1a90d6cc7515deb952..c0e0844f75b9fe891fc7352c25e957c800f5e4b6 100644 (file)
@@ -43,6 +43,7 @@ struct cht_mc_private {
        struct clk *mclk;
        struct snd_soc_jack jack;
        bool ts3a227e_present;
+       int quirks;
 };
 
 static int platform_clock_control(struct snd_soc_dapm_widget *w,
@@ -54,6 +55,10 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
        struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
        int ret;
 
+       /* See the comment in snd_cht_mc_probe() */
+       if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+               return 0;
+
        codec_dai = snd_soc_card_get_codec_dai(card, CHT_CODEC_DAI);
        if (!codec_dai) {
                dev_err(card->dev, "Codec dai not found; Unable to set platform clock\n");
@@ -223,6 +228,10 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
                        "jack detection gpios not added, error %d\n", ret);
        }
 
+       /* See the comment in snd_cht_mc_probe() */
+       if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+               return 0;
+
        /*
         * The firmware might enable the clock at
         * boot (this information may or may not
@@ -423,16 +432,15 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        const char *mclk_name;
        struct snd_soc_acpi_mach *mach;
        const char *platform_name;
-       int quirks = 0;
-
-       dmi_id = dmi_first_match(cht_max98090_quirk_table);
-       if (dmi_id)
-               quirks = (unsigned long)dmi_id->driver_data;
 
        drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
        if (!drv)
                return -ENOMEM;
 
+       dmi_id = dmi_first_match(cht_max98090_quirk_table);
+       if (dmi_id)
+               drv->quirks = (unsigned long)dmi_id->driver_data;
+
        drv->ts3a227e_present = acpi_dev_found("104C227E");
        if (!drv->ts3a227e_present) {
                /* no need probe TI jack detection chip */
@@ -458,7 +466,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        snd_soc_card_cht.dev = &pdev->dev;
        snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
 
-       if (quirks & QUIRK_PMC_PLT_CLK_0)
+       if (drv->quirks & QUIRK_PMC_PLT_CLK_0)
                mclk_name = "pmc_plt_clk_0";
        else
                mclk_name = "pmc_plt_clk_3";
@@ -471,6 +479,21 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
                return PTR_ERR(drv->mclk);
        }
 
+       /*
+        * Boards which have the MAX98090's clk connected to clk_0 do not seem
+        * to like it if we muck with the clock. If we disable the clock when
+        * it is unused we get "max98090 i2c-193C9890:00: PLL unlocked" errors
+        * and the PLL never seems to lock again.
+        * So for these boards we enable it here once and leave it at that.
+        */
+       if (drv->quirks & QUIRK_PMC_PLT_CLK_0) {
+               ret_val = clk_prepare_enable(drv->mclk);
+               if (ret_val < 0) {
+                       dev_err(&pdev->dev, "MCLK enable error: %d\n", ret_val);
+                       return ret_val;
+               }
+       }
+
        ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht);
        if (ret_val) {
                dev_err(&pdev->dev,
@@ -481,11 +504,23 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        return ret_val;
 }
 
+static int snd_cht_mc_remove(struct platform_device *pdev)
+{
+       struct snd_soc_card *card = platform_get_drvdata(pdev);
+       struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
+
+       if (ctx->quirks & QUIRK_PMC_PLT_CLK_0)
+               clk_disable_unprepare(ctx->mclk);
+
+       return 0;
+}
+
 static struct platform_driver snd_cht_mc_driver = {
        .driver = {
                .name = "cht-bsw-max98090",
        },
        .probe = snd_cht_mc_probe,
+       .remove = snd_cht_mc_remove,
 };
 
 module_platform_driver(snd_cht_mc_driver)
index 7044d8c2b187375cd6fd44b3342eedf4b37fd8d1..879f14257a3ea4c8fae5eaa683c365e519ecfc17 100644 (file)
@@ -405,7 +405,7 @@ static const struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
 };
 
 static const unsigned int dmic_2ch[] = {
-       4,
+       2,
 };
 
 static const struct snd_pcm_hw_constraint_list constraints_dmic_2ch = {
index 28c4806b196a2fc3505ce88e298b834cf64e1c56..4bf70b4429f03075b07d877c67f6003d15d5336d 100644 (file)
@@ -483,6 +483,7 @@ static void skl_set_base_module_format(struct skl_sst *ctx,
        base_cfg->audio_fmt.bit_depth = format->bit_depth;
        base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth;
        base_cfg->audio_fmt.ch_cfg = format->ch_cfg;
+       base_cfg->audio_fmt.sample_type = format->sample_type;
 
        dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n",
                        format->bit_depth, format->valid_bit_depth,
index 1ae83f4ccc3615bfa42e28d08d43f27870526ded..9735e24122514f81d9ee88002fdbdd32ade65e4e 100644 (file)
@@ -181,6 +181,7 @@ int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params)
        struct hdac_stream *hstream;
        struct hdac_ext_stream *stream;
        struct hdac_ext_link *link;
+       unsigned char stream_tag;
 
        hstream = snd_hdac_get_stream(bus, params->stream,
                                        params->link_dma_id + 1);
@@ -199,10 +200,13 @@ int skl_pcm_link_dma_prepare(struct device *dev, struct skl_pipe_params *params)
 
        snd_hdac_ext_link_stream_setup(stream, format_val);
 
-       list_for_each_entry(link, &bus->hlink_list, list) {
-               if (link->index == params->link_index)
-                       snd_hdac_ext_link_set_stream_id(link,
-                                       hstream->stream_tag);
+       stream_tag = hstream->stream_tag;
+       if (stream->hstream.direction == SNDRV_PCM_STREAM_PLAYBACK) {
+               list_for_each_entry(link, &bus->hlink_list, list) {
+                       if (link->index == params->link_index)
+                               snd_hdac_ext_link_set_stream_id(link,
+                                                               stream_tag);
+               }
        }
 
        stream->link_prepared = 1;
@@ -645,6 +649,7 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream,
        struct hdac_ext_stream *link_dev =
                                snd_soc_dai_get_dma_data(dai, substream);
        struct hdac_ext_link *link;
+       unsigned char stream_tag;
 
        dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
 
@@ -654,7 +659,11 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream,
        if (!link)
                return -EINVAL;
 
-       snd_hdac_ext_link_clear_stream_id(link, hdac_stream(link_dev)->stream_tag);
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               stream_tag = hdac_stream(link_dev)->stream_tag;
+               snd_hdac_ext_link_clear_stream_id(link, stream_tag);
+       }
+
        snd_hdac_ext_stream_release(link_dev, HDAC_EXT_STREAM_TYPE_LINK);
        return 0;
 }
@@ -1453,13 +1462,20 @@ static int skl_platform_soc_probe(struct snd_soc_component *component)
        return 0;
 }
 
+static void skl_pcm_remove(struct snd_soc_component *component)
+{
+       /* remove topology */
+       snd_soc_tplg_component_remove(component, SND_SOC_TPLG_INDEX_ALL);
+}
+
 static const struct snd_soc_component_driver skl_component  = {
        .name           = "pcm",
        .probe          = skl_platform_soc_probe,
+       .remove         = skl_pcm_remove,
        .ops            = &skl_platform_ops,
        .pcm_new        = skl_pcm_new,
        .pcm_free       = skl_pcm_free,
-       .ignore_module_refcount = 1, /* do not increase the refcount in core */
+       .module_get_upon_open = 1, /* increment refcount when a pcm is opened */
 };
 
 int skl_platform_register(struct device *dev)
index 1b8bcdaf02d116cbc124aac6597f3a8a501b468d..9a163d7064d174ff1e4142cb09b31adeaa9c6f59 100644 (file)
@@ -49,6 +49,7 @@ enum bt_sco_state {
        BT_SCO_STATE_IDLE,
        BT_SCO_STATE_RUNNING,
        BT_SCO_STATE_ENDING,
+       BT_SCO_STATE_LOOPBACK,
 };
 
 enum bt_sco_direct {
@@ -486,7 +487,8 @@ static irqreturn_t mtk_btcvsd_snd_irq_handler(int irq_id, void *dev)
        if (bt->rx->state != BT_SCO_STATE_RUNNING &&
            bt->rx->state != BT_SCO_STATE_ENDING &&
            bt->tx->state != BT_SCO_STATE_RUNNING &&
-           bt->tx->state != BT_SCO_STATE_ENDING) {
+           bt->tx->state != BT_SCO_STATE_ENDING &&
+           bt->tx->state != BT_SCO_STATE_LOOPBACK) {
                dev_warn(bt->dev, "%s(), in idle state: rx->state: %d, tx->state: %d\n",
                         __func__, bt->rx->state, bt->tx->state);
                goto irq_handler_exit;
@@ -512,6 +514,42 @@ static irqreturn_t mtk_btcvsd_snd_irq_handler(int irq_id, void *dev)
        buf_cnt_tx = btsco_packet_info[packet_type][2];
        buf_cnt_rx = btsco_packet_info[packet_type][3];
 
+       if (bt->tx->state == BT_SCO_STATE_LOOPBACK) {
+               u8 *src, *dst;
+               unsigned long connsys_addr_rx, ap_addr_rx;
+               unsigned long connsys_addr_tx, ap_addr_tx;
+
+               connsys_addr_rx = *bt->bt_reg_pkt_r;
+               ap_addr_rx = (unsigned long)bt->bt_sram_bank2_base +
+                            (connsys_addr_rx & 0xFFFF);
+
+               connsys_addr_tx = *bt->bt_reg_pkt_w;
+               ap_addr_tx = (unsigned long)bt->bt_sram_bank2_base +
+                            (connsys_addr_tx & 0xFFFF);
+
+               if (connsys_addr_tx == 0xdeadfeed ||
+                   connsys_addr_rx == 0xdeadfeed) {
+                       /* bt return 0xdeadfeed if read reg during bt sleep */
+                       dev_warn(bt->dev, "%s(), connsys_addr_tx == 0xdeadfeed\n",
+                                __func__);
+                       goto irq_handler_exit;
+               }
+
+               src = (u8 *)ap_addr_rx;
+               dst = (u8 *)ap_addr_tx;
+
+               mtk_btcvsd_snd_data_transfer(BT_SCO_DIRECT_BT2ARM, src,
+                                            bt->tx->temp_packet_buf,
+                                            packet_length,
+                                            packet_num);
+               mtk_btcvsd_snd_data_transfer(BT_SCO_DIRECT_ARM2BT,
+                                            bt->tx->temp_packet_buf, dst,
+                                            packet_length,
+                                            packet_num);
+               bt->rx->rw_cnt++;
+               bt->tx->rw_cnt++;
+       }
+
        if (bt->rx->state == BT_SCO_STATE_RUNNING ||
            bt->rx->state == BT_SCO_STATE_ENDING) {
                if (bt->rx->xrun) {
@@ -1067,6 +1105,33 @@ static int btcvsd_band_set(struct snd_kcontrol *kcontrol,
        return 0;
 }
 
+static int btcvsd_loopback_get(struct snd_kcontrol *kcontrol,
+                              struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+       bool lpbk_en = bt->tx->state == BT_SCO_STATE_LOOPBACK;
+
+       ucontrol->value.integer.value[0] = lpbk_en;
+       return 0;
+}
+
+static int btcvsd_loopback_set(struct snd_kcontrol *kcontrol,
+                              struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
+
+       if (ucontrol->value.integer.value[0]) {
+               mtk_btcvsd_snd_set_state(bt, bt->tx, BT_SCO_STATE_LOOPBACK);
+               mtk_btcvsd_snd_set_state(bt, bt->rx, BT_SCO_STATE_LOOPBACK);
+       } else {
+               mtk_btcvsd_snd_set_state(bt, bt->tx, BT_SCO_STATE_RUNNING);
+               mtk_btcvsd_snd_set_state(bt, bt->rx, BT_SCO_STATE_RUNNING);
+       }
+       return 0;
+}
+
 static int btcvsd_tx_mute_get(struct snd_kcontrol *kcontrol,
                              struct snd_ctl_elem_value *ucontrol)
 {
@@ -1202,6 +1267,8 @@ static int btcvsd_tx_timestamp_get(struct snd_kcontrol *kcontrol,
 static const struct snd_kcontrol_new mtk_btcvsd_snd_controls[] = {
        SOC_ENUM_EXT("BTCVSD Band", btcvsd_enum[0],
                     btcvsd_band_get, btcvsd_band_set),
+       SOC_SINGLE_BOOL_EXT("BTCVSD Loopback Switch", 0,
+                           btcvsd_loopback_get, btcvsd_loopback_set),
        SOC_SINGLE_BOOL_EXT("BTCVSD Tx Mute Switch", 0,
                            btcvsd_tx_mute_get, btcvsd_tx_mute_set),
        SOC_SINGLE_BOOL_EXT("BTCVSD Tx Irq Received Switch", 0,
index f523ad103acc4cc9ce2bd745a24aa6f5988cc7bf..48e81c5d52fc27959d8b8215a1903764bf302fdd 100644 (file)
@@ -605,6 +605,10 @@ void mt8183_mck_disable(struct mtk_base_afe *afe, int mck_id)
        int m_sel_id = mck_div[mck_id].m_sel_id;
        int div_clk_id = mck_div[mck_id].div_clk_id;
 
+       /* i2s5 mck not support */
+       if (mck_id == MT8183_I2S5_MCK)
+               return;
+
        clk_disable_unprepare(afe_priv->clk[div_clk_id]);
        if (m_sel_id >= 0)
                clk_disable_unprepare(afe_priv->clk[m_sel_id]);
index 400e29edb1c9c4db4d3afbbad2b6a41952100a22..d0b403a0e27b830bc480935fa75df123c3375302 100644 (file)
@@ -24,7 +24,7 @@
 
 #include "rockchip_pdm.h"
 
-#define PDM_DMA_BURST_SIZE     (16) /* size * width: 16*4 = 64 bytes */
+#define PDM_DMA_BURST_SIZE     (8) /* size * width: 8*4 = 32 bytes */
 
 struct rk_pdm_dev {
        struct device *dev;
@@ -208,7 +208,9 @@ static int rockchip_pdm_set_fmt(struct snd_soc_dai *cpu_dai,
                return -EINVAL;
        }
 
+       pm_runtime_get_sync(cpu_dai->dev);
        regmap_update_bits(pdm->regmap, PDM_CLK_CTRL, mask, val);
+       pm_runtime_put(cpu_dai->dev);
 
        return 0;
 }
index 4231001226f494da587a7096d27c001802fcdf9c..ab471d550d17adf682d0c5c26607e6de85885791 100644 (file)
@@ -1130,11 +1130,11 @@ static const struct snd_soc_dapm_widget samsung_i2s_widgets[] = {
 };
 
 static const struct snd_soc_dapm_route samsung_i2s_dapm_routes[] = {
-       { "Playback Mixer", NULL, "Primary" },
-       { "Playback Mixer", NULL, "Secondary" },
+       { "Playback Mixer", NULL, "Primary Playback" },
+       { "Playback Mixer", NULL, "Secondary Playback" },
 
        { "Mixer DAI TX", NULL, "Playback Mixer" },
-       { "Playback Mixer", NULL, "Mixer DAI RX" },
+       { "Primary Capture", NULL, "Mixer DAI RX" },
 };
 
 static const struct snd_soc_component_driver samsung_i2s_component = {
@@ -1155,7 +1155,8 @@ static int i2s_alloc_dais(struct samsung_i2s_priv *priv,
                          int num_dais)
 {
        static const char *dai_names[] = { "samsung-i2s", "samsung-i2s-sec" };
-       static const char *stream_names[] = { "Primary", "Secondary" };
+       static const char *stream_names[] = { "Primary Playback",
+                                             "Secondary Playback" };
        struct snd_soc_dai_driver *dai_drv;
        struct i2s_dai *dai;
        int i;
@@ -1201,6 +1202,7 @@ static int i2s_alloc_dais(struct samsung_i2s_priv *priv,
        dai_drv->capture.channels_max = 2;
        dai_drv->capture.rates = i2s_dai_data->pcm_rates;
        dai_drv->capture.formats = SAMSUNG_I2S_FMTS;
+       dai_drv->capture.stream_name = "Primary Capture";
 
        return 0;
 }
index 694512f980fdc207577fcd2a5a59d7a6484bfdd0..1dc54c4206f0adc1ed5250c2d9bf3f44f5c7adf2 100644 (file)
@@ -91,11 +91,11 @@ static int odroid_card_be_hw_params(struct snd_pcm_substream *substream,
                return ret;
 
        /*
-        *  We add 1 to the rclk_freq value in order to avoid too low clock
+        *  We add 2 to the rclk_freq value in order to avoid too low clock
         *  frequency values due to the EPLL output frequency not being exact
         *  multiple of the audio sampling rate.
         */
-       rclk_freq = params_rate(params) * rfs + 1;
+       rclk_freq = params_rate(params) * rfs + 2;
 
        ret = clk_set_rate(priv->sclk_i2s, rclk_freq);
        if (ret < 0)
index 022996d2db1301d16619ef8ead029a19fce1b75b..4fe83e611c01e0d983e5af5c79594d4c858ec610 100644 (file)
@@ -110,6 +110,8 @@ static const struct of_device_id rsnd_of_match[] = {
        { .compatible = "renesas,rcar_sound-gen1", .data = (void *)RSND_GEN1 },
        { .compatible = "renesas,rcar_sound-gen2", .data = (void *)RSND_GEN2 },
        { .compatible = "renesas,rcar_sound-gen3", .data = (void *)RSND_GEN3 },
+       /* Special Handling */
+       { .compatible = "renesas,rcar_sound-r8a77990", .data = (void *)(RSND_GEN3 | RSND_SOC_E) },
        {},
 };
 MODULE_DEVICE_TABLE(of, rsnd_of_match);
index 90625c57847b51281c5b38f81f8ab062a70a3785..0e6ef4e1840021d00c94089ca8522c956d4bb813 100644 (file)
@@ -607,6 +607,8 @@ struct rsnd_priv {
 #define RSND_GEN1      (1 << 0)
 #define RSND_GEN2      (2 << 0)
 #define RSND_GEN3      (3 << 0)
+#define RSND_SOC_MASK  (0xFF << 4)
+#define RSND_SOC_E     (1 << 4) /* E1/E2/E3 */
 
        /*
         * below value will be filled on rsnd_gen_probe()
@@ -679,6 +681,9 @@ struct rsnd_priv {
 #define rsnd_is_gen1(priv)     (((priv)->flags & RSND_GEN_MASK) == RSND_GEN1)
 #define rsnd_is_gen2(priv)     (((priv)->flags & RSND_GEN_MASK) == RSND_GEN2)
 #define rsnd_is_gen3(priv)     (((priv)->flags & RSND_GEN_MASK) == RSND_GEN3)
+#define rsnd_is_e3(priv)       (((priv)->flags & \
+                                       (RSND_GEN_MASK | RSND_SOC_MASK)) == \
+                                       (RSND_GEN3 | RSND_SOC_E))
 
 #define rsnd_flags_has(p, f) ((p)->flags & (f))
 #define rsnd_flags_set(p, f) ((p)->flags |= (f))
index db81e066b92ef98902d4433e939b25ab0a635625..585ffba0244b9f568685ed52323d31f7f9fcb3b4 100644 (file)
@@ -14,7 +14,6 @@
  */
 
 #include "rsnd.h"
-#include <linux/sys_soc.h>
 
 #define SRC_NAME "src"
 
@@ -135,7 +134,7 @@ unsigned int rsnd_src_get_rate(struct rsnd_priv *priv,
        return rate;
 }
 
-const static u32 bsdsr_table_pattern1[] = {
+static const u32 bsdsr_table_pattern1[] = {
        0x01800000, /* 6 - 1/6 */
        0x01000000, /* 6 - 1/4 */
        0x00c00000, /* 6 - 1/3 */
@@ -144,7 +143,7 @@ const static u32 bsdsr_table_pattern1[] = {
        0x00400000, /* 6 - 1   */
 };
 
-const static u32 bsdsr_table_pattern2[] = {
+static const u32 bsdsr_table_pattern2[] = {
        0x02400000, /* 6 - 1/6 */
        0x01800000, /* 6 - 1/4 */
        0x01200000, /* 6 - 1/3 */
@@ -153,7 +152,7 @@ const static u32 bsdsr_table_pattern2[] = {
        0x00600000, /* 6 - 1   */
 };
 
-const static u32 bsisr_table[] = {
+static const u32 bsisr_table[] = {
        0x00100060, /* 6 - 1/6 */
        0x00100040, /* 6 - 1/4 */
        0x00100030, /* 6 - 1/3 */
@@ -162,7 +161,7 @@ const static u32 bsisr_table[] = {
        0x00100020, /* 6 - 1   */
 };
 
-const static u32 chan288888[] = {
+static const u32 chan288888[] = {
        0x00000006, /* 1 to 2 */
        0x000001fe, /* 1 to 8 */
        0x000001fe, /* 1 to 8 */
@@ -171,7 +170,7 @@ const static u32 chan288888[] = {
        0x000001fe, /* 1 to 8 */
 };
 
-const static u32 chan244888[] = {
+static const u32 chan244888[] = {
        0x00000006, /* 1 to 2 */
        0x0000001e, /* 1 to 4 */
        0x0000001e, /* 1 to 4 */
@@ -180,7 +179,7 @@ const static u32 chan244888[] = {
        0x000001fe, /* 1 to 8 */
 };
 
-const static u32 chan222222[] = {
+static const u32 chan222222[] = {
        0x00000006, /* 1 to 2 */
        0x00000006, /* 1 to 2 */
        0x00000006, /* 1 to 2 */
@@ -189,18 +188,12 @@ const static u32 chan222222[] = {
        0x00000006, /* 1 to 2 */
 };
 
-static const struct soc_device_attribute ov_soc[] = {
-       { .soc_id = "r8a77990" }, /* E3 */
-       { /* sentinel */ }
-};
-
 static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
                                      struct rsnd_mod *mod)
 {
        struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
        struct device *dev = rsnd_priv_to_dev(priv);
        struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-       const struct soc_device_attribute *soc = soc_device_match(ov_soc);
        int is_play = rsnd_io_is_play(io);
        int use_src = 0;
        u32 fin, fout;
@@ -307,7 +300,7 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
        /*
         * E3 need to overwrite
         */
-       if (soc)
+       if (rsnd_is_e3(priv))
                switch (rsnd_mod_id(mod)) {
                case 0:
                case 4:
index 93d316d5bf8e3cac63d9955cfa9e0a6b7bbbb9d0..46e3ab0fced47342be93c32f6509a79829967281 100644 (file)
@@ -947,7 +947,7 @@ static void soc_cleanup_component(struct snd_soc_component *component)
        snd_soc_dapm_free(snd_soc_component_get_dapm(component));
        soc_cleanup_component_debugfs(component);
        component->card = NULL;
-       if (!component->driver->ignore_module_refcount)
+       if (!component->driver->module_get_upon_open)
                module_put(component->dev->driver->owner);
 }
 
@@ -1381,7 +1381,7 @@ static int soc_probe_component(struct snd_soc_card *card,
                return 0;
        }
 
-       if (!component->driver->ignore_module_refcount &&
+       if (!component->driver->module_get_upon_open &&
            !try_module_get(component->dev->driver->owner))
                return -ENODEV;
 
@@ -2797,6 +2797,7 @@ int snd_soc_register_card(struct snd_soc_card *card)
 
                ret = soc_init_dai_link(card, link);
                if (ret) {
+                       soc_cleanup_platform(card);
                        dev_err(card->dev, "ASoC: failed to init link %s\n",
                                link->name);
                        mutex_unlock(&client_mutex);
@@ -2819,6 +2820,7 @@ int snd_soc_register_card(struct snd_soc_card *card)
        card->instantiated = 0;
        mutex_init(&card->mutex);
        mutex_init(&card->dapm_mutex);
+       spin_lock_init(&card->dpcm_lock);
 
        return snd_soc_bind_card(card);
 }
index 1ec06ef6d161606922b1a3f8d16e2e685129db62..0382a47b30bd8182d40340c839268233cd47e21b 100644 (file)
@@ -3650,6 +3650,13 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
        case snd_soc_dapm_dac:
        case snd_soc_dapm_aif_in:
        case snd_soc_dapm_pga:
+       case snd_soc_dapm_buffer:
+       case snd_soc_dapm_scheduler:
+       case snd_soc_dapm_effect:
+       case snd_soc_dapm_src:
+       case snd_soc_dapm_asrc:
+       case snd_soc_dapm_encoder:
+       case snd_soc_dapm_decoder:
        case snd_soc_dapm_out_drv:
        case snd_soc_dapm_micbias:
        case snd_soc_dapm_line:
@@ -3957,6 +3964,10 @@ snd_soc_dapm_free_kcontrol(struct snd_soc_card *card,
        int count;
 
        devm_kfree(card->dev, (void *)*private_value);
+
+       if (!w_param_text)
+               return;
+
        for (count = 0 ; count < num_params; count++)
                devm_kfree(card->dev, (void *)w_param_text[count]);
        devm_kfree(card->dev, w_param_text);
index 0d5ec68a1e50869e00ea6abb13b229c79b903329..be80a12fba27cc381b0438a95bb4987c6843cb39 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/pm_runtime.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
 #include <linux/export.h>
@@ -463,6 +464,9 @@ static int soc_pcm_components_close(struct snd_pcm_substream *substream,
                        continue;
 
                component->driver->ops->close(substream);
+
+               if (component->driver->module_get_upon_open)
+                       module_put(component->dev->driver->owner);
        }
 
        return 0;
@@ -513,6 +517,12 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
                    !component->driver->ops->open)
                        continue;
 
+               if (component->driver->module_get_upon_open &&
+                   !try_module_get(component->dev->driver->owner)) {
+                       ret = -ENODEV;
+                       goto module_err;
+               }
+
                ret = component->driver->ops->open(substream);
                if (ret < 0) {
                        dev_err(component->dev,
@@ -628,7 +638,7 @@ codec_dai_err:
 
 component_err:
        soc_pcm_components_close(substream, component);
-
+module_err:
        if (cpu_dai->driver->ops->shutdown)
                cpu_dai->driver->ops->shutdown(substream, cpu_dai);
 out:
@@ -954,10 +964,13 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
                codec_params = *params;
 
                /* fixup params based on TDM slot masks */
-               if (codec_dai->tx_mask)
+               if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+                   codec_dai->tx_mask)
                        soc_pcm_codec_params_fixup(&codec_params,
                                                   codec_dai->tx_mask);
-               if (codec_dai->rx_mask)
+
+               if (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
+                   codec_dai->rx_mask)
                        soc_pcm_codec_params_fixup(&codec_params,
                                                   codec_dai->rx_mask);
 
@@ -1213,6 +1226,7 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
                struct snd_soc_pcm_runtime *be, int stream)
 {
        struct snd_soc_dpcm *dpcm;
+       unsigned long flags;
 
        /* only add new dpcms */
        for_each_dpcm_be(fe, stream, dpcm) {
@@ -1228,8 +1242,10 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
        dpcm->fe = fe;
        be->dpcm[stream].runtime = fe->dpcm[stream].runtime;
        dpcm->state = SND_SOC_DPCM_LINK_STATE_NEW;
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        list_add(&dpcm->list_be, &fe->dpcm[stream].be_clients);
        list_add(&dpcm->list_fe, &be->dpcm[stream].fe_clients);
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 
        dev_dbg(fe->dev, "connected new DPCM %s path %s %s %s\n",
                        stream ? "capture" : "playback",  fe->dai_link->name,
@@ -1275,6 +1291,7 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
 void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
 {
        struct snd_soc_dpcm *dpcm, *d;
+       unsigned long flags;
 
        for_each_dpcm_be_safe(fe, stream, dpcm, d) {
                dev_dbg(fe->dev, "ASoC: BE %s disconnect check for %s\n",
@@ -1294,8 +1311,10 @@ void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
 #ifdef CONFIG_DEBUG_FS
                debugfs_remove(dpcm->debugfs_state);
 #endif
+               spin_lock_irqsave(&fe->card->dpcm_lock, flags);
                list_del(&dpcm->list_be);
                list_del(&dpcm->list_fe);
+               spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
                kfree(dpcm);
        }
 }
@@ -1547,10 +1566,13 @@ int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
 void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream)
 {
        struct snd_soc_dpcm *dpcm;
+       unsigned long flags;
 
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        for_each_dpcm_be(fe, stream, dpcm)
                dpcm->be->dpcm[stream].runtime_update =
                                                SND_SOC_DPCM_UPDATE_NO;
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 }
 
 static void dpcm_be_dai_startup_unwind(struct snd_soc_pcm_runtime *fe,
@@ -1899,10 +1921,15 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
                struct snd_soc_pcm_runtime *be = dpcm->be;
                struct snd_pcm_substream *be_substream =
                        snd_soc_dpcm_get_substream(be, stream);
-               struct snd_soc_pcm_runtime *rtd = be_substream->private_data;
+               struct snd_soc_pcm_runtime *rtd;
                struct snd_soc_dai *codec_dai;
                int i;
 
+               /* A backend may not have the requested substream */
+               if (!be_substream)
+                       continue;
+
+               rtd = be_substream->private_data;
                if (rtd->dai_link->be_hw_params_fixup)
                        continue;
 
@@ -2571,6 +2598,7 @@ static int dpcm_run_update_startup(struct snd_soc_pcm_runtime *fe, int stream)
        struct snd_soc_dpcm *dpcm;
        enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
        int ret;
+       unsigned long flags;
 
        dev_dbg(fe->dev, "ASoC: runtime %s open on FE %s\n",
                        stream ? "capture" : "playback", fe->dai_link->name);
@@ -2640,11 +2668,13 @@ close:
        dpcm_be_dai_shutdown(fe, stream);
 disconnect:
        /* disconnect any non started BEs */
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        for_each_dpcm_be(fe, stream, dpcm) {
                struct snd_soc_pcm_runtime *be = dpcm->be;
                if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
                                dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
        }
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 
        return ret;
 }
@@ -3221,7 +3251,10 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
 {
        struct snd_soc_dpcm *dpcm;
        int state;
+       int ret = 1;
+       unsigned long flags;
 
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        for_each_dpcm_fe(be, stream, dpcm) {
 
                if (dpcm->fe == fe)
@@ -3230,12 +3263,15 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
                state = dpcm->fe->dpcm[stream].state;
                if (state == SND_SOC_DPCM_STATE_START ||
                        state == SND_SOC_DPCM_STATE_PAUSED ||
-                       state == SND_SOC_DPCM_STATE_SUSPEND)
-                       return 0;
+                       state == SND_SOC_DPCM_STATE_SUSPEND) {
+                       ret = 0;
+                       break;
+               }
        }
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 
        /* it's safe to free/stop this BE DAI */
-       return 1;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_free_stop);
 
@@ -3248,7 +3284,10 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
 {
        struct snd_soc_dpcm *dpcm;
        int state;
+       int ret = 1;
+       unsigned long flags;
 
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        for_each_dpcm_fe(be, stream, dpcm) {
 
                if (dpcm->fe == fe)
@@ -3258,12 +3297,15 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
                if (state == SND_SOC_DPCM_STATE_START ||
                        state == SND_SOC_DPCM_STATE_PAUSED ||
                        state == SND_SOC_DPCM_STATE_SUSPEND ||
-                       state == SND_SOC_DPCM_STATE_PREPARE)
-                       return 0;
+                       state == SND_SOC_DPCM_STATE_PREPARE) {
+                       ret = 0;
+                       break;
+               }
        }
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 
        /* it's safe to change hw_params */
-       return 1;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_params);
 
@@ -3302,6 +3344,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
        struct snd_pcm_hw_params *params = &fe->dpcm[stream].hw_params;
        struct snd_soc_dpcm *dpcm;
        ssize_t offset = 0;
+       unsigned long flags;
 
        /* FE state */
        offset += snprintf(buf + offset, size - offset,
@@ -3329,6 +3372,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
                goto out;
        }
 
+       spin_lock_irqsave(&fe->card->dpcm_lock, flags);
        for_each_dpcm_be(fe, stream, dpcm) {
                struct snd_soc_pcm_runtime *be = dpcm->be;
                params = &dpcm->hw_params;
@@ -3349,7 +3393,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
                                params_channels(params),
                                params_rate(params));
        }
-
+       spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
 out:
        return offset;
 }
index 25fca7055464a894e5e260bf872de8c410136f56..96852d25061936e1f72b9c929686a1a740bdc6a8 100644 (file)
@@ -482,10 +482,11 @@ static void remove_widget(struct snd_soc_component *comp,
 
                        snd_ctl_remove(card, kcontrol);
 
-                       kfree(dobj->control.dvalues);
+                       /* free enum kcontrol's dvalues and dtexts */
+                       kfree(se->dobj.control.dvalues);
                        for (j = 0; j < se->items; j++)
-                               kfree(dobj->control.dtexts[j]);
-                       kfree(dobj->control.dtexts);
+                               kfree(se->dobj.control.dtexts[j]);
+                       kfree(se->dobj.control.dtexts);
 
                        kfree(se);
                        kfree(w->kcontrol_news[i].name);
index 47901983a6ff88706dd847376a42d5aeb2c3a4c6..78bed97347136974d3da6b8a09d3465eaafffbdd 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/clk.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
@@ -37,6 +38,8 @@ struct stm32_adfsdm_priv {
        /* PCM buffer */
        unsigned char *pcm_buff;
        unsigned int pos;
+
+       struct mutex lock; /* protect against race condition on iio state */
 };
 
 static const struct snd_pcm_hardware stm32_adfsdm_pcm_hw = {
@@ -62,10 +65,12 @@ static void stm32_adfsdm_shutdown(struct snd_pcm_substream *substream,
 {
        struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
 
+       mutex_lock(&priv->lock);
        if (priv->iio_active) {
                iio_channel_stop_all_cb(priv->iio_cb);
                priv->iio_active = false;
        }
+       mutex_unlock(&priv->lock);
 }
 
 static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
@@ -74,13 +79,19 @@ static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
        struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
        int ret;
 
+       mutex_lock(&priv->lock);
+       if (priv->iio_active) {
+               iio_channel_stop_all_cb(priv->iio_cb);
+               priv->iio_active = false;
+       }
+
        ret = iio_write_channel_attribute(priv->iio_ch,
                                          substream->runtime->rate, 0,
                                          IIO_CHAN_INFO_SAMP_FREQ);
        if (ret < 0) {
                dev_err(dai->dev, "%s: Failed to set %d sampling rate\n",
                        __func__, substream->runtime->rate);
-               return ret;
+               goto out;
        }
 
        if (!priv->iio_active) {
@@ -92,6 +103,9 @@ static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
                                __func__, ret);
        }
 
+out:
+       mutex_unlock(&priv->lock);
+
        return ret;
 }
 
@@ -291,6 +305,7 @@ MODULE_DEVICE_TABLE(of, stm32_adfsdm_of_match);
 static int stm32_adfsdm_probe(struct platform_device *pdev)
 {
        struct stm32_adfsdm_priv *priv;
+       struct snd_soc_component *component;
        int ret;
 
        priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -299,6 +314,7 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
 
        priv->dev = &pdev->dev;
        priv->dai_drv = stm32_adfsdm_dai;
+       mutex_init(&priv->lock);
 
        dev_set_drvdata(&pdev->dev, priv);
 
@@ -317,9 +333,15 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
        if (IS_ERR(priv->iio_cb))
                return PTR_ERR(priv->iio_cb);
 
-       ret = devm_snd_soc_register_component(&pdev->dev,
-                                             &stm32_adfsdm_soc_platform,
-                                             NULL, 0);
+       component = devm_kzalloc(&pdev->dev, sizeof(*component), GFP_KERNEL);
+       if (!component)
+               return -ENOMEM;
+#ifdef CONFIG_DEBUG_FS
+       component->debugfs_prefix = "pcm";
+#endif
+
+       ret = snd_soc_add_component(&pdev->dev, component,
+                                   &stm32_adfsdm_soc_platform, NULL, 0);
        if (ret < 0)
                dev_err(&pdev->dev, "%s: Failed to register PCM platform\n",
                        __func__);
@@ -327,12 +349,20 @@ static int stm32_adfsdm_probe(struct platform_device *pdev)
        return ret;
 }
 
+static int stm32_adfsdm_remove(struct platform_device *pdev)
+{
+       snd_soc_unregister_component(&pdev->dev);
+
+       return 0;
+}
+
 static struct platform_driver stm32_adfsdm_driver = {
        .driver = {
                   .name = STM32_ADFSDM_DRV_NAME,
                   .of_match_table = stm32_adfsdm_of_match,
                   },
        .probe = stm32_adfsdm_probe,
+       .remove = stm32_adfsdm_remove,
 };
 
 module_platform_driver(stm32_adfsdm_driver);
index 47c334de6b0966a4e5fd4a589e8ce6fd757c1fce..8968458eec62d6b796e2eaae3df508e03c375e6d 100644 (file)
@@ -281,7 +281,6 @@ static bool stm32_i2s_readable_reg(struct device *dev, unsigned int reg)
        case STM32_I2S_CFG2_REG:
        case STM32_I2S_IER_REG:
        case STM32_I2S_SR_REG:
-       case STM32_I2S_TXDR_REG:
        case STM32_I2S_RXDR_REG:
        case STM32_I2S_CGFR_REG:
                return true;
@@ -293,7 +292,7 @@ static bool stm32_i2s_readable_reg(struct device *dev, unsigned int reg)
 static bool stm32_i2s_volatile_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
-       case STM32_I2S_TXDR_REG:
+       case STM32_I2S_SR_REG:
        case STM32_I2S_RXDR_REG:
                return true;
        default:
index 14c9591aae4260d94f6d19231aaa56ad436e5469..d68d62f12df56098214a94de25758598c9f7502f 100644 (file)
@@ -105,6 +105,7 @@ static int stm32_sai_set_sync(struct stm32_sai_data *sai_client,
        if (!pdev) {
                dev_err(&sai_client->pdev->dev,
                        "Device not found for node %pOFn\n", np_provider);
+               of_node_put(np_provider);
                return -ENODEV;
        }
 
@@ -113,19 +114,20 @@ static int stm32_sai_set_sync(struct stm32_sai_data *sai_client,
                dev_err(&sai_client->pdev->dev,
                        "SAI sync provider data not found\n");
                ret = -EINVAL;
-               goto out_put_dev;
+               goto error;
        }
 
        /* Configure sync client */
        ret = stm32_sai_sync_conf_client(sai_client, synci);
        if (ret < 0)
-               goto out_put_dev;
+               goto error;
 
        /* Configure sync provider */
        ret = stm32_sai_sync_conf_provider(sai_provider, synco);
 
-out_put_dev:
+error:
        put_device(&pdev->dev);
+       of_node_put(np_provider);
        return ret;
 }
 
index f9297228c41ce4412f86d9ba7010ba714d44d345..d7045aa520de56eb42d108a14e92aee810d15f08 100644 (file)
@@ -70,6 +70,7 @@
 #define SAI_IEC60958_STATUS_BYTES      24
 
 #define SAI_MCLK_NAME_LEN              32
+#define SAI_RATE_11K                   11025
 
 /**
  * struct stm32_sai_sub_data - private data of SAI sub block (block A or B)
  * @slot_mask: rx or tx active slots mask. set at init or at runtime
  * @data_size: PCM data width. corresponds to PCM substream width.
  * @spdif_frm_cnt: S/PDIF playback frame counter
- * @snd_aes_iec958: iec958 data
+ * @iec958: iec958 data
  * @ctrl_lock: control lock
+ * @irq_lock: prevent race condition with IRQ
  */
 struct stm32_sai_sub_data {
        struct platform_device *pdev;
@@ -133,6 +135,7 @@ struct stm32_sai_sub_data {
        unsigned int spdif_frm_cnt;
        struct snd_aes_iec958 iec958;
        struct mutex ctrl_lock; /* protect resources accessed by controls */
+       spinlock_t irq_lock; /* used to prevent race condition with IRQ */
 };
 
 enum stm32_sai_fifo_th {
@@ -307,6 +310,25 @@ static int stm32_sai_set_clk_div(struct stm32_sai_sub_data *sai,
        return ret;
 }
 
+static int stm32_sai_set_parent_clock(struct stm32_sai_sub_data *sai,
+                                     unsigned int rate)
+{
+       struct platform_device *pdev = sai->pdev;
+       struct clk *parent_clk = sai->pdata->clk_x8k;
+       int ret;
+
+       if (!(rate % SAI_RATE_11K))
+               parent_clk = sai->pdata->clk_x11k;
+
+       ret = clk_set_parent(sai->sai_ck, parent_clk);
+       if (ret)
+               dev_err(&pdev->dev, " Error %d setting sai_ck parent clock. %s",
+                       ret, ret == -EBUSY ?
+                       "Active stream rates conflict\n" : "\n");
+
+       return ret;
+}
+
 static long stm32_sai_mclk_round_rate(struct clk_hw *hw, unsigned long rate,
                                      unsigned long *prate)
 {
@@ -474,8 +496,10 @@ static irqreturn_t stm32_sai_isr(int irq, void *devid)
                status = SNDRV_PCM_STATE_XRUN;
        }
 
-       if (status != SNDRV_PCM_STATE_RUNNING)
+       spin_lock(&sai->irq_lock);
+       if (status != SNDRV_PCM_STATE_RUNNING && sai->substream)
                snd_pcm_stop_xrun(sai->substream);
+       spin_unlock(&sai->irq_lock);
 
        return IRQ_HANDLED;
 }
@@ -486,25 +510,29 @@ static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai,
        struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
        int ret;
 
-       if (dir == SND_SOC_CLOCK_OUT) {
+       if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) {
                ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
                                         SAI_XCR1_NODIV,
                                         (unsigned int)~SAI_XCR1_NODIV);
                if (ret < 0)
                        return ret;
 
-               dev_dbg(cpu_dai->dev, "SAI MCLK frequency is %uHz\n", freq);
-               sai->mclk_rate = freq;
+               /* If master clock is used, set parent clock now */
+               ret = stm32_sai_set_parent_clock(sai, freq);
+               if (ret)
+                       return ret;
 
-               if (sai->sai_mclk) {
-                       ret = clk_set_rate_exclusive(sai->sai_mclk,
-                                                    sai->mclk_rate);
-                       if (ret) {
-                               dev_err(cpu_dai->dev,
-                                       "Could not set mclk rate\n");
-                               return ret;
-                       }
+               ret = clk_set_rate_exclusive(sai->sai_mclk, freq);
+               if (ret) {
+                       dev_err(cpu_dai->dev,
+                               ret == -EBUSY ?
+                               "Active streams have incompatible rates" :
+                               "Could not set mclk rate\n");
+                       return ret;
                }
+
+               dev_dbg(cpu_dai->dev, "SAI MCLK frequency is %uHz\n", freq);
+               sai->mclk_rate = freq;
        }
 
        return 0;
@@ -679,8 +707,19 @@ static int stm32_sai_startup(struct snd_pcm_substream *substream,
 {
        struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
        int imr, cr2, ret;
+       unsigned long flags;
 
+       spin_lock_irqsave(&sai->irq_lock, flags);
        sai->substream = substream;
+       spin_unlock_irqrestore(&sai->irq_lock, flags);
+
+       if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
+               snd_pcm_hw_constraint_mask64(substream->runtime,
+                                            SNDRV_PCM_HW_PARAM_FORMAT,
+                                            SNDRV_PCM_FMTBIT_S32_LE);
+               snd_pcm_hw_constraint_single(substream->runtime,
+                                            SNDRV_PCM_HW_PARAM_CHANNELS, 2);
+       }
 
        ret = clk_prepare_enable(sai->sai_ck);
        if (ret < 0) {
@@ -898,14 +937,16 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
                                     struct snd_pcm_hw_params *params)
 {
        struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
-       int div = 0;
+       int div = 0, cr1 = 0;
        int sai_clk_rate, mclk_ratio, den;
        unsigned int rate = params_rate(params);
+       int ret;
 
-       if (!(rate % 11025))
-               clk_set_parent(sai->sai_ck, sai->pdata->clk_x11k);
-       else
-               clk_set_parent(sai->sai_ck, sai->pdata->clk_x8k);
+       if (!sai->sai_mclk) {
+               ret = stm32_sai_set_parent_clock(sai, rate);
+               if (ret)
+                       return ret;
+       }
        sai_clk_rate = clk_get_rate(sai->sai_ck);
 
        if (STM_SAI_IS_F4(sai->pdata)) {
@@ -943,13 +984,19 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
                } else {
                        if (sai->mclk_rate) {
                                mclk_ratio = sai->mclk_rate / rate;
-                               if ((mclk_ratio != 512) &&
-                                   (mclk_ratio != 256)) {
+                               if (mclk_ratio == 512) {
+                                       cr1 = SAI_XCR1_OSR;
+                               } else if (mclk_ratio != 256) {
                                        dev_err(cpu_dai->dev,
                                                "Wrong mclk ratio %d\n",
                                                mclk_ratio);
                                        return -EINVAL;
                                }
+
+                               regmap_update_bits(sai->regmap,
+                                                  STM_SAI_CR1_REGX,
+                                                  SAI_XCR1_OSR, cr1);
+
                                div = stm32_sai_get_clk_div(sai, sai_clk_rate,
                                                            sai->mclk_rate);
                                if (div < 0)
@@ -1051,28 +1098,36 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream,
                               struct snd_soc_dai *cpu_dai)
 {
        struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
+       unsigned long flags;
 
        regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
 
        regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_NODIV,
                           SAI_XCR1_NODIV);
 
-       clk_disable_unprepare(sai->sai_ck);
+       /* Release mclk rate only if rate was actually set */
+       if (sai->mclk_rate) {
+               clk_rate_exclusive_put(sai->sai_mclk);
+               sai->mclk_rate = 0;
+       }
 
-       clk_rate_exclusive_put(sai->sai_mclk);
+       clk_disable_unprepare(sai->sai_ck);
 
+       spin_lock_irqsave(&sai->irq_lock, flags);
        sai->substream = NULL;
+       spin_unlock_irqrestore(&sai->irq_lock, flags);
 }
 
 static int stm32_sai_pcm_new(struct snd_soc_pcm_runtime *rtd,
                             struct snd_soc_dai *cpu_dai)
 {
        struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev);
+       struct snd_kcontrol_new knew = iec958_ctls;
 
        if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
                dev_dbg(&sai->pdev->dev, "%s: register iec controls", __func__);
-               return snd_ctl_add(rtd->pcm->card,
-                                  snd_ctl_new1(&iec958_ctls, sai));
+               knew.device = rtd->pcm->device;
+               return snd_ctl_add(rtd->pcm->card, snd_ctl_new1(&knew, sai));
        }
 
        return 0;
@@ -1081,7 +1136,7 @@ static int stm32_sai_pcm_new(struct snd_soc_pcm_runtime *rtd,
 static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
 {
        struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev);
-       int cr1 = 0, cr1_mask;
+       int cr1 = 0, cr1_mask, ret;
 
        sai->cpu_dai = cpu_dai;
 
@@ -1111,8 +1166,10 @@ static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
        /* Configure synchronization */
        if (sai->sync == SAI_SYNC_EXTERNAL) {
                /* Configure synchro client and provider */
-               sai->pdata->set_sync(sai->pdata, sai->np_sync_provider,
-                                    sai->synco, sai->synci);
+               ret = sai->pdata->set_sync(sai->pdata, sai->np_sync_provider,
+                                          sai->synco, sai->synci);
+               if (ret)
+                       return ret;
        }
 
        cr1_mask |= SAI_XCR1_SYNCEN_MASK;
@@ -1392,7 +1449,6 @@ static int stm32_sai_sub_dais_init(struct platform_device *pdev,
        if (!sai->cpu_dai_drv)
                return -ENOMEM;
 
-       sai->cpu_dai_drv->name = dev_name(&pdev->dev);
        if (STM_SAI_IS_PLAYBACK(sai)) {
                memcpy(sai->cpu_dai_drv, &stm32_sai_playback_dai,
                       sizeof(stm32_sai_playback_dai));
@@ -1402,6 +1458,7 @@ static int stm32_sai_sub_dais_init(struct platform_device *pdev,
                       sizeof(stm32_sai_capture_dai));
                sai->cpu_dai_drv->capture.stream_name = sai->cpu_dai_drv->name;
        }
+       sai->cpu_dai_drv->name = dev_name(&pdev->dev);
 
        return 0;
 }
@@ -1424,6 +1481,7 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
 
        sai->pdev = pdev;
        mutex_init(&sai->ctrl_lock);
+       spin_lock_init(&sai->irq_lock);
        platform_set_drvdata(pdev, sai);
 
        sai->pdata = dev_get_drvdata(pdev->dev.parent);
index 7afe8fae49391a1e8e18ed34c39b79260c5a2a47..b61f65bed4e48fc7ce6273992261f4c72e04da43 100644 (file)
@@ -351,12 +351,16 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
 {
        struct usb_device *usbdev = line6->usbdev;
        int ret;
-       unsigned char len;
+       unsigned char *len;
        unsigned count;
 
        if (address > 0xffff || datalen > 0xff)
                return -EINVAL;
 
+       len = kmalloc(sizeof(*len), GFP_KERNEL);
+       if (!len)
+               return -ENOMEM;
+
        /* query the serial number: */
        ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
                              USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
@@ -365,7 +369,7 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
 
        if (ret < 0) {
                dev_err(line6->ifcdev, "read request failed (error %d)\n", ret);
-               return ret;
+               goto exit;
        }
 
        /* Wait for data length. We'll get 0xff until length arrives. */
@@ -375,28 +379,29 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
                ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
                                      USB_TYPE_VENDOR | USB_RECIP_DEVICE |
                                      USB_DIR_IN,
-                                     0x0012, 0x0000, &len, 1,
+                                     0x0012, 0x0000, len, 1,
                                      LINE6_TIMEOUT * HZ);
                if (ret < 0) {
                        dev_err(line6->ifcdev,
                                "receive length failed (error %d)\n", ret);
-                       return ret;
+                       goto exit;
                }
 
-               if (len != 0xff)
+               if (*len != 0xff)
                        break;
        }
 
-       if (len == 0xff) {
+       ret = -EIO;
+       if (*len == 0xff) {
                dev_err(line6->ifcdev, "read failed after %d retries\n",
                        count);
-               return -EIO;
-       } else if (len != datalen) {
+               goto exit;
+       } else if (*len != datalen) {
                /* should be equal or something went wrong */
                dev_err(line6->ifcdev,
                        "length mismatch (expected %d, got %d)\n",
-                       (int)datalen, (int)len);
-               return -EIO;
+                       (int)datalen, (int)*len);
+               goto exit;
        }
 
        /* receive the result: */
@@ -405,12 +410,12 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
                              0x0013, 0x0000, data, datalen,
                              LINE6_TIMEOUT * HZ);
 
-       if (ret < 0) {
+       if (ret < 0)
                dev_err(line6->ifcdev, "read failed (error %d)\n", ret);
-               return ret;
-       }
 
-       return 0;
+exit:
+       kfree(len);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(line6_read_data);
 
@@ -422,12 +427,16 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
 {
        struct usb_device *usbdev = line6->usbdev;
        int ret;
-       unsigned char status;
+       unsigned char *status;
        int count;
 
        if (address > 0xffff || datalen > 0xffff)
                return -EINVAL;
 
+       status = kmalloc(sizeof(*status), GFP_KERNEL);
+       if (!status)
+               return -ENOMEM;
+
        ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
                              USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
                              0x0022, address, data, datalen,
@@ -436,7 +445,7 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
        if (ret < 0) {
                dev_err(line6->ifcdev,
                        "write request failed (error %d)\n", ret);
-               return ret;
+               goto exit;
        }
 
        for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) {
@@ -447,28 +456,29 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
                                      USB_TYPE_VENDOR | USB_RECIP_DEVICE |
                                      USB_DIR_IN,
                                      0x0012, 0x0000,
-                                     &status, 1, LINE6_TIMEOUT * HZ);
+                                     status, 1, LINE6_TIMEOUT * HZ);
 
                if (ret < 0) {
                        dev_err(line6->ifcdev,
                                "receiving status failed (error %d)\n", ret);
-                       return ret;
+                       goto exit;
                }
 
-               if (status != 0xff)
+               if (*status != 0xff)
                        break;
        }
 
-       if (status == 0xff) {
+       if (*status == 0xff) {
                dev_err(line6->ifcdev, "write failed after %d retries\n",
                        count);
-               return -EIO;
-       } else if (status != 0) {
+               ret = -EIO;
+       } else if (*status != 0) {
                dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
-               return -EIO;
+               ret = -EIO;
        }
-
-       return 0;
+exit:
+       kfree(status);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(line6_write_data);
 
index 36ed9c85c0eb2834769bc9e09e6014c72cfb73ae..5f3c87264e66776049f436b45ce7e44f368db4e8 100644 (file)
@@ -225,28 +225,32 @@ static void podhd_startup_start_workqueue(struct timer_list *t)
 static int podhd_dev_start(struct usb_line6_podhd *pod)
 {
        int ret;
-       u8 init_bytes[8];
+       u8 *init_bytes;
        int i;
        struct usb_device *usbdev = pod->line6.usbdev;
 
+       init_bytes = kmalloc(8, GFP_KERNEL);
+       if (!init_bytes)
+               return -ENOMEM;
+
        ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
                                        0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
                                        0x11, 0,
                                        NULL, 0, LINE6_TIMEOUT * HZ);
        if (ret < 0) {
                dev_err(pod->line6.ifcdev, "read request failed (error %d)\n", ret);
-               return ret;
+               goto exit;
        }
 
        /* NOTE: looks like some kind of ping message */
        ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
                                        USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
                                        0x11, 0x0,
-                                       &init_bytes, 3, LINE6_TIMEOUT * HZ);
+                                       init_bytes, 3, LINE6_TIMEOUT * HZ);
        if (ret < 0) {
                dev_err(pod->line6.ifcdev,
                        "receive length failed (error %d)\n", ret);
-               return ret;
+               goto exit;
        }
 
        pod->firmware_version =
@@ -255,7 +259,7 @@ static int podhd_dev_start(struct usb_line6_podhd *pod)
        for (i = 0; i <= 16; i++) {
                ret = line6_read_data(&pod->line6, 0xf000 + 0x08 * i, init_bytes, 8);
                if (ret < 0)
-                       return ret;
+                       goto exit;
        }
 
        ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
@@ -263,10 +267,9 @@ static int podhd_dev_start(struct usb_line6_podhd *pod)
                                        USB_TYPE_STANDARD | USB_RECIP_DEVICE | USB_DIR_OUT,
                                        1, 0,
                                        NULL, 0, LINE6_TIMEOUT * HZ);
-       if (ret < 0)
-               return ret;
-
-       return 0;
+exit:
+       kfree(init_bytes);
+       return ret;
 }
 
 static void podhd_startup_workqueue(struct work_struct *work)
index f47ba94e6f4a11e0370d7e67c8eef81b46daf6e0..19bee725de00dd46b2f8ade8334574e43c41b646 100644 (file)
@@ -365,16 +365,21 @@ static bool toneport_has_source_select(struct usb_line6_toneport *toneport)
 /*
        Setup Toneport device.
 */
-static void toneport_setup(struct usb_line6_toneport *toneport)
+static int toneport_setup(struct usb_line6_toneport *toneport)
 {
-       u32 ticks;
+       u32 *ticks;
        struct usb_line6 *line6 = &toneport->line6;
        struct usb_device *usbdev = line6->usbdev;
 
+       ticks = kmalloc(sizeof(*ticks), GFP_KERNEL);
+       if (!ticks)
+               return -ENOMEM;
+
        /* sync time on device with host: */
        /* note: 32-bit timestamps overflow in year 2106 */
-       ticks = (u32)ktime_get_real_seconds();
-       line6_write_data(line6, 0x80c6, &ticks, 4);
+       *ticks = (u32)ktime_get_real_seconds();
+       line6_write_data(line6, 0x80c6, ticks, 4);
+       kfree(ticks);
 
        /* enable device: */
        toneport_send_cmd(usbdev, 0x0301, 0x0000);
@@ -389,6 +394,7 @@ static void toneport_setup(struct usb_line6_toneport *toneport)
                toneport_update_led(toneport);
 
        mod_timer(&toneport->timer, jiffies + TONEPORT_PCM_DELAY * HZ);
+       return 0;
 }
 
 /*
@@ -451,7 +457,9 @@ static int toneport_init(struct usb_line6 *line6,
                        return err;
        }
 
-       toneport_setup(toneport);
+       err = toneport_setup(toneport);
+       if (err)
+               return err;
 
        /* register audio system: */
        return snd_card_register(line6->card);
@@ -463,7 +471,11 @@ static int toneport_init(struct usb_line6 *line6,
 */
 static int toneport_reset_resume(struct usb_interface *interface)
 {
-       toneport_setup(usb_get_intfdata(interface));
+       int err;
+
+       err = toneport_setup(usb_get_intfdata(interface));
+       if (err)
+               return err;
        return line6_resume(interface);
 }
 #endif
index a7f413cb704dc7154c42c5adf29ed242d632cfc2..b14ab512c2ce0d4ef2aceae5d673e6b528e1120c 100644 (file)
@@ -441,7 +441,7 @@ static int shbuf_setup_backstore(struct xen_snd_front_pcm_stream_info *stream,
 {
        int i;
 
-       stream->buffer = alloc_pages_exact(stream->buffer_sz, GFP_KERNEL);
+       stream->buffer = alloc_pages_exact(buffer_sz, GFP_KERNEL);
        if (!stream->buffer)
                return -ENOMEM;
 
index c317d3e6867a3770e0e3f8e4ed679172adef1d71..ea6a255ae61f7268ada806820ecafb258ca6b0eb 100644 (file)
@@ -27,8 +27,6 @@
 #define MAP_NONBLOCK   0x40000
 #define MAP_NORESERVE  0x10000
 #define MAP_POPULATE   0x20000
-#define MAP_PRIVATE    0x02
-#define MAP_SHARED     0x01
 #define MAP_STACK      0x80000
 #define PROT_EXEC      0x4
 #define PROT_GROWSDOWN 0x01000000
diff --git a/tools/arch/arc/include/uapi/asm/unistd.h b/tools/arch/arc/include/uapi/asm/unistd.h
new file mode 100644 (file)
index 0000000..5eafa11
--- /dev/null
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/******** no-legacy-syscalls-ABI *******/
+
+/*
+ * Non-typical guard macro to enable inclusion twice in ARCH sys.c
+ * That is how the Generic syscall wrapper generator works
+ */
+#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
+#define _UAPI_ASM_ARC_UNISTD_H
+
+#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_SYS_EXECVE
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_TIME32_SYSCALLS
+
+#define sys_mmap2 sys_mmap_pgoff
+
+#include <asm-generic/unistd.h>
+
+#define NR_syscalls    __NR_syscalls
+
+/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
+#define __NR_sysfs             (__NR_arch_specific_syscall + 3)
+
+/* ARC specific syscall */
+#define __NR_cacheflush                (__NR_arch_specific_syscall + 0)
+#define __NR_arc_settls                (__NR_arch_specific_syscall + 1)
+#define __NR_arc_gettls                (__NR_arch_specific_syscall + 2)
+#define __NR_arc_usr_cmpxchg   (__NR_arch_specific_syscall + 4)
+
+__SYSCALL(__NR_cacheflush, sys_cacheflush)
+__SYSCALL(__NR_arc_settls, sys_arc_settls)
+__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
+__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
+__SYSCALL(__NR_sysfs, sys_sysfs)
+
+#undef __SYSCALL
+
+#endif
index dae1584cf017f6aa311a5b78c3311b0bf55c2b18..4703d218663a2ad81e7c8d4fd0749bed8199ef4f 100644 (file)
@@ -17,5 +17,7 @@
 
 #define __ARCH_WANT_RENAMEAT
 #define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_TIME32_SYSCALLS
 
 #include <asm-generic/unistd.h>
diff --git a/tools/arch/hexagon/include/uapi/asm/unistd.h b/tools/arch/hexagon/include/uapi/asm/unistd.h
new file mode 100644 (file)
index 0000000..432c4db
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Syscall support for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ *  The kernel pulls this unistd.h in three different ways:
+ *  1.  the "normal" way which gets all the __NR defines
+ *  2.  with __SYSCALL defined to produce function declarations
+ *  3.  with __SYSCALL defined to produce syscall table initialization
+ *  See also:  syscalltab.c
+ */
+
+#define sys_mmap2 sys_mmap_pgoff
+#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_SYS_EXECVE
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_TIME32_SYSCALLS
+
+#include <asm-generic/unistd.h>
index de2206883abc0f83f21e59f9f7edf17e4ce2b85a..c8acaa138d466934a9d9adfd7934ca41da5e3279 100644 (file)
@@ -28,8 +28,6 @@
 #define MAP_NONBLOCK   0x20000
 #define MAP_NORESERVE  0x0400
 #define MAP_POPULATE   0x10000
-#define MAP_PRIVATE    0x002
-#define MAP_SHARED     0x001
 #define MAP_STACK      0x40000
 #define PROT_EXEC      0x04
 #define PROT_GROWSDOWN 0x01000000
index 1bd78758bde9815b067d705fdec7157ca3860b34..f9fd1325f5bda746d3393857f4603ae46d7e845e 100644 (file)
@@ -27,8 +27,6 @@
 #define MAP_NONBLOCK   0x20000
 #define MAP_NORESERVE  0x4000
 #define MAP_POPULATE   0x10000
-#define MAP_PRIVATE    0x02
-#define MAP_SHARED     0x01
 #define MAP_STACK      0x40000
 #define PROT_EXEC      0x4
 #define PROT_GROWSDOWN 0x01000000
index 8c876c166ef27b2c6fa754781fdbb103f2addc54..26ca425f4c2c39515bccee31029b3cada4c73639 100644 (file)
@@ -463,10 +463,12 @@ struct kvm_ppc_cpu_char {
 #define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED      (1ULL << 58)
 #define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF     (1ULL << 57)
 #define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS       (1ULL << 56)
+#define KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST    (1ull << 54)
 
 #define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY      (1ULL << 63)
 #define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR         (1ULL << 62)
 #define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR    (1ULL << 61)
+#define KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE    (1ull << 58)
 
 /* Per-vcpu XICS interrupt controller state */
 #define KVM_REG_PPC_ICP_STATE  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
diff --git a/tools/arch/riscv/include/uapi/asm/unistd.h b/tools/arch/riscv/include/uapi/asm/unistd.h
new file mode 100644 (file)
index 0000000..0e2eeeb
--- /dev/null
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2018 David Abdurachmanov <david.abdurachmanov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifdef __LP64__
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SET_GET_RLIMIT
+#endif /* __LP64__ */
+
+#include <asm-generic/unistd.h>
+
+/*
+ * Allows the instruction cache to be flushed from userspace.  Despite RISC-V
+ * having a direct 'fence.i' instruction available to userspace (which we
+ * can't trap!), that's not actually viable when running on Linux because the
+ * kernel might schedule a process on another hart.  There is no way for
+ * userspace to handle this without invoking the kernel (as it doesn't know the
+ * thread->hart mappings), so we've defined a RISC-V specific system call to
+ * flush the instruction cache.
+ *
+ * __NR_riscv_flush_icache is defined to flush the instruction cache over an
+ * address range, with the flush applying to either all threads or just the
+ * caller.  We don't currently do anything with the address range, that's just
+ * in there for forwards compatibility.
+ */
+#ifndef __NR_riscv_flush_icache
+#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
+#endif
+__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
index 6d612252471143ee2fa850e6b3c1f13456426afe..981ff94796484426911c41e333b82ef395380caa 100644 (file)
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
 #define X86_FEATURE_AVX512_4VNNIW      (18*32+ 2) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS      (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_TSX_FORCE_ABORT    (18*32+13) /* "" TSX_FORCE_ABORT */
 #define X86_FEATURE_PCONFIG            (18*32+18) /* Intel PCONFIG */
 #define X86_FEATURE_SPEC_CTRL          (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP                (18*32+27) /* "" Single Thread Indirect Branch Predictors */
index f0b0c90dd398246eb2882050d69c6b53ccca11af..d213ec5c3766db0dd5176c951b13e5f3c1514cfb 100644 (file)
 
 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL        1
 #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL       2
+#define VMX_ABORT_VMCS_CORRUPTED             3
 #define VMX_ABORT_LOAD_HOST_MSR_FAIL         4
 
 #endif /* _UAPIVMX_H */
index 34dde6f44dae517a119e9cff65261753a75b8fdc..f2b08c990afc846ed3901d94cb4cf9b5c1a8d9ed 100644 (file)
@@ -27,8 +27,6 @@
 #define MAP_NONBLOCK   0x20000
 #define MAP_NORESERVE  0x0400
 #define MAP_POPULATE   0x10000
-#define MAP_PRIVATE    0x002
-#define MAP_SHARED     0x001
 #define MAP_STACK      0x40000
 #define PROT_EXEC      0x4
 #define PROT_GROWSDOWN 0x01000000
index e0c650d91784acac01abe568385244b55a60fd30..994a7e0d16fb54a0dbd68cfaaf350b54927418b5 100644 (file)
@@ -1151,6 +1151,9 @@ static int do_create(int argc, char **argv)
                                return -1;
                        }
                        NEXT_ARG();
+               } else {
+                       p_err("unknown arg %s", *argv);
+                       return -1;
                }
        }
 
index 8ef80d65a474f001466c4a8ee940b2bb93859881..d2be5a06c339155f355058946f655f38f97b5db9 100644 (file)
@@ -401,41 +401,31 @@ static int do_show(int argc, char **argv)
 
 static int do_dump(int argc, char **argv)
 {
-       unsigned int finfo_rec_size, linfo_rec_size, jited_linfo_rec_size;
-       void *func_info = NULL, *linfo = NULL, *jited_linfo = NULL;
-       unsigned int nr_finfo, nr_linfo = 0, nr_jited_linfo = 0;
+       struct bpf_prog_info_linear *info_linear;
        struct bpf_prog_linfo *prog_linfo = NULL;
-       unsigned long *func_ksyms = NULL;
-       struct bpf_prog_info info = {};
-       unsigned int *func_lens = NULL;
+       enum {DUMP_JITED, DUMP_XLATED} mode;
        const char *disasm_opt = NULL;
-       unsigned int nr_func_ksyms;
-       unsigned int nr_func_lens;
+       struct bpf_prog_info *info;
        struct dump_data dd = {};
-       __u32 len = sizeof(info);
+       void *func_info = NULL;
        struct btf *btf = NULL;
-       unsigned int buf_size;
        char *filepath = NULL;
        bool opcodes = false;
        bool visual = false;
        char func_sig[1024];
        unsigned char *buf;
        bool linum = false;
-       __u32 *member_len;
-       __u64 *member_ptr;
+       __u32 member_len;
+       __u64 arrays;
        ssize_t n;
-       int err;
        int fd;
 
        if (is_prefix(*argv, "jited")) {
                if (disasm_init())
                        return -1;
-
-               member_len = &info.jited_prog_len;
-               member_ptr = &info.jited_prog_insns;
+               mode = DUMP_JITED;
        } else if (is_prefix(*argv, "xlated")) {
-               member_len = &info.xlated_prog_len;
-               member_ptr = &info.xlated_prog_insns;
+               mode = DUMP_XLATED;
        } else {
                p_err("expected 'xlated' or 'jited', got: %s", *argv);
                return -1;
@@ -474,175 +464,50 @@ static int do_dump(int argc, char **argv)
                return -1;
        }
 
-       err = bpf_obj_get_info_by_fd(fd, &info, &len);
-       if (err) {
-               p_err("can't get prog info: %s", strerror(errno));
-               return -1;
-       }
-
-       if (!*member_len) {
-               p_info("no instructions returned");
-               close(fd);
-               return 0;
-       }
+       if (mode == DUMP_JITED)
+               arrays = 1UL << BPF_PROG_INFO_JITED_INSNS;
+       else
+               arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS;
 
-       buf_size = *member_len;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
+       arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
+       arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
 
-       buf = malloc(buf_size);
-       if (!buf) {
-               p_err("mem alloc failed");
-               close(fd);
+       info_linear = bpf_program__get_prog_info_linear(fd, arrays);
+       close(fd);
+       if (IS_ERR_OR_NULL(info_linear)) {
+               p_err("can't get prog info: %s", strerror(errno));
                return -1;
        }
 
-       nr_func_ksyms = info.nr_jited_ksyms;
-       if (nr_func_ksyms) {
-               func_ksyms = malloc(nr_func_ksyms * sizeof(__u64));
-               if (!func_ksyms) {
-                       p_err("mem alloc failed");
-                       close(fd);
-                       goto err_free;
-               }
-       }
-
-       nr_func_lens = info.nr_jited_func_lens;
-       if (nr_func_lens) {
-               func_lens = malloc(nr_func_lens * sizeof(__u32));
-               if (!func_lens) {
-                       p_err("mem alloc failed");
-                       close(fd);
+       info = &info_linear->info;
+       if (mode == DUMP_JITED) {
+               if (info->jited_prog_len == 0) {
+                       p_info("no instructions returned");
                        goto err_free;
                }
-       }
-
-       nr_finfo = info.nr_func_info;
-       finfo_rec_size = info.func_info_rec_size;
-       if (nr_finfo && finfo_rec_size) {
-               func_info = malloc(nr_finfo * finfo_rec_size);
-               if (!func_info) {
-                       p_err("mem alloc failed");
-                       close(fd);
+               buf = (unsigned char *)(info->jited_prog_insns);
+               member_len = info->jited_prog_len;
+       } else {        /* DUMP_XLATED */
+               if (info->xlated_prog_len == 0) {
+                       p_err("error retrieving insn dump: kernel.kptr_restrict set?");
                        goto err_free;
                }
+               buf = (unsigned char *)info->xlated_prog_insns;
+               member_len = info->xlated_prog_len;
        }
 
-       linfo_rec_size = info.line_info_rec_size;
-       if (info.nr_line_info && linfo_rec_size && info.btf_id) {
-               nr_linfo = info.nr_line_info;
-               linfo = malloc(nr_linfo * linfo_rec_size);
-               if (!linfo) {
-                       p_err("mem alloc failed");
-                       close(fd);
-                       goto err_free;
-               }
-       }
-
-       jited_linfo_rec_size = info.jited_line_info_rec_size;
-       if (info.nr_jited_line_info &&
-           jited_linfo_rec_size &&
-           info.nr_jited_ksyms &&
-           info.nr_jited_func_lens &&
-           info.btf_id) {
-               nr_jited_linfo = info.nr_jited_line_info;
-               jited_linfo = malloc(nr_jited_linfo * jited_linfo_rec_size);
-               if (!jited_linfo) {
-                       p_err("mem alloc failed");
-                       close(fd);
-                       goto err_free;
-               }
-       }
-
-       memset(&info, 0, sizeof(info));
-
-       *member_ptr = ptr_to_u64(buf);
-       *member_len = buf_size;
-       info.jited_ksyms = ptr_to_u64(func_ksyms);
-       info.nr_jited_ksyms = nr_func_ksyms;
-       info.jited_func_lens = ptr_to_u64(func_lens);
-       info.nr_jited_func_lens = nr_func_lens;
-       info.nr_func_info = nr_finfo;
-       info.func_info_rec_size = finfo_rec_size;
-       info.func_info = ptr_to_u64(func_info);
-       info.nr_line_info = nr_linfo;
-       info.line_info_rec_size = linfo_rec_size;
-       info.line_info = ptr_to_u64(linfo);
-       info.nr_jited_line_info = nr_jited_linfo;
-       info.jited_line_info_rec_size = jited_linfo_rec_size;
-       info.jited_line_info = ptr_to_u64(jited_linfo);
-
-       err = bpf_obj_get_info_by_fd(fd, &info, &len);
-       close(fd);
-       if (err) {
-               p_err("can't get prog info: %s", strerror(errno));
-               goto err_free;
-       }
-
-       if (*member_len > buf_size) {
-               p_err("too many instructions returned");
-               goto err_free;
-       }
-
-       if (info.nr_jited_ksyms > nr_func_ksyms) {
-               p_err("too many addresses returned");
-               goto err_free;
-       }
-
-       if (info.nr_jited_func_lens > nr_func_lens) {
-               p_err("too many values returned");
-               goto err_free;
-       }
-
-       if (info.nr_func_info != nr_finfo) {
-               p_err("incorrect nr_func_info %d vs. expected %d",
-                     info.nr_func_info, nr_finfo);
-               goto err_free;
-       }
-
-       if (info.func_info_rec_size != finfo_rec_size) {
-               p_err("incorrect func_info_rec_size %d vs. expected %d",
-                     info.func_info_rec_size, finfo_rec_size);
-               goto err_free;
-       }
-
-       if (linfo && info.nr_line_info != nr_linfo) {
-               p_err("incorrect nr_line_info %u vs. expected %u",
-                     info.nr_line_info, nr_linfo);
-               goto err_free;
-       }
-
-       if (info.line_info_rec_size != linfo_rec_size) {
-               p_err("incorrect line_info_rec_size %u vs. expected %u",
-                     info.line_info_rec_size, linfo_rec_size);
-               goto err_free;
-       }
-
-       if (jited_linfo && info.nr_jited_line_info != nr_jited_linfo) {
-               p_err("incorrect nr_jited_line_info %u vs. expected %u",
-                     info.nr_jited_line_info, nr_jited_linfo);
-               goto err_free;
-       }
-
-       if (info.jited_line_info_rec_size != jited_linfo_rec_size) {
-               p_err("incorrect jited_line_info_rec_size %u vs. expected %u",
-                     info.jited_line_info_rec_size, jited_linfo_rec_size);
-               goto err_free;
-       }
-
-       if ((member_len == &info.jited_prog_len &&
-            info.jited_prog_insns == 0) ||
-           (member_len == &info.xlated_prog_len &&
-            info.xlated_prog_insns == 0)) {
-               p_err("error retrieving insn dump: kernel.kptr_restrict set?");
-               goto err_free;
-       }
-
-       if (info.btf_id && btf__get_from_id(info.btf_id, &btf)) {
+       if (info->btf_id && btf__get_from_id(info->btf_id, &btf)) {
                p_err("failed to get btf");
                goto err_free;
        }
 
-       if (nr_linfo) {
-               prog_linfo = bpf_prog_linfo__new(&info);
+       func_info = (void *)info->func_info;
+
+       if (info->nr_line_info) {
+               prog_linfo = bpf_prog_linfo__new(info);
                if (!prog_linfo)
                        p_info("error in processing bpf_line_info.  continue without it.");
        }
@@ -655,9 +520,9 @@ static int do_dump(int argc, char **argv)
                        goto err_free;
                }
 
-               n = write(fd, buf, *member_len);
+               n = write(fd, buf, member_len);
                close(fd);
-               if (n != *member_len) {
+               if (n != member_len) {
                        p_err("error writing output file: %s",
                              n < 0 ? strerror(errno) : "short write");
                        goto err_free;
@@ -665,19 +530,19 @@ static int do_dump(int argc, char **argv)
 
                if (json_output)
                        jsonw_null(json_wtr);
-       } else if (member_len == &info.jited_prog_len) {
+       } else if (mode == DUMP_JITED) {
                const char *name = NULL;
 
-               if (info.ifindex) {
-                       name = ifindex_to_bfd_params(info.ifindex,
-                                                    info.netns_dev,
-                                                    info.netns_ino,
+               if (info->ifindex) {
+                       name = ifindex_to_bfd_params(info->ifindex,
+                                                    info->netns_dev,
+                                                    info->netns_ino,
                                                     &disasm_opt);
                        if (!name)
                                goto err_free;
                }
 
-               if (info.nr_jited_func_lens && info.jited_func_lens) {
+               if (info->nr_jited_func_lens && info->jited_func_lens) {
                        struct kernel_sym *sym = NULL;
                        struct bpf_func_info *record;
                        char sym_name[SYM_MAX_NAME];
@@ -685,17 +550,16 @@ static int do_dump(int argc, char **argv)
                        __u64 *ksyms = NULL;
                        __u32 *lens;
                        __u32 i;
-
-                       if (info.nr_jited_ksyms) {
+                       if (info->nr_jited_ksyms) {
                                kernel_syms_load(&dd);
-                               ksyms = (__u64 *) info.jited_ksyms;
+                               ksyms = (__u64 *) info->jited_ksyms;
                        }
 
                        if (json_output)
                                jsonw_start_array(json_wtr);
 
-                       lens = (__u32 *) info.jited_func_lens;
-                       for (i = 0; i < info.nr_jited_func_lens; i++) {
+                       lens = (__u32 *) info->jited_func_lens;
+                       for (i = 0; i < info->nr_jited_func_lens; i++) {
                                if (ksyms) {
                                        sym = kernel_syms_search(&dd, ksyms[i]);
                                        if (sym)
@@ -707,7 +571,7 @@ static int do_dump(int argc, char **argv)
                                }
 
                                if (func_info) {
-                                       record = func_info + i * finfo_rec_size;
+                                       record = func_info + i * info->func_info_rec_size;
                                        btf_dumper_type_only(btf, record->type_id,
                                                             func_sig,
                                                             sizeof(func_sig));
@@ -744,49 +608,37 @@ static int do_dump(int argc, char **argv)
                        if (json_output)
                                jsonw_end_array(json_wtr);
                } else {
-                       disasm_print_insn(buf, *member_len, opcodes, name,
+                       disasm_print_insn(buf, member_len, opcodes, name,
                                          disasm_opt, btf, NULL, 0, 0, false);
                }
        } else if (visual) {
                if (json_output)
                        jsonw_null(json_wtr);
                else
-                       dump_xlated_cfg(buf, *member_len);
+                       dump_xlated_cfg(buf, member_len);
        } else {
                kernel_syms_load(&dd);
-               dd.nr_jited_ksyms = info.nr_jited_ksyms;
-               dd.jited_ksyms = (__u64 *) info.jited_ksyms;
+               dd.nr_jited_ksyms = info->nr_jited_ksyms;
+               dd.jited_ksyms = (__u64 *) info->jited_ksyms;
                dd.btf = btf;
                dd.func_info = func_info;
-               dd.finfo_rec_size = finfo_rec_size;
+               dd.finfo_rec_size = info->func_info_rec_size;
                dd.prog_linfo = prog_linfo;
 
                if (json_output)
-                       dump_xlated_json(&dd, buf, *member_len, opcodes,
+                       dump_xlated_json(&dd, buf, member_len, opcodes,
                                         linum);
                else
-                       dump_xlated_plain(&dd, buf, *member_len, opcodes,
+                       dump_xlated_plain(&dd, buf, member_len, opcodes,
                                          linum);
                kernel_syms_destroy(&dd);
        }
 
-       free(buf);
-       free(func_ksyms);
-       free(func_lens);
-       free(func_info);
-       free(linfo);
-       free(jited_linfo);
-       bpf_prog_linfo__free(prog_linfo);
+       free(info_linear);
        return 0;
 
 err_free:
-       free(buf);
-       free(func_ksyms);
-       free(func_lens);
-       free(func_info);
-       free(linfo);
-       free(jited_linfo);
-       bpf_prog_linfo__free(prog_linfo);
+       free(info_linear);
        return -1;
 }
 
index 61e46d54a67c0dff521e665782b316f92ef9c320..361207387b1b7efc4f6175af0208514f5cd591a8 100644 (file)
@@ -66,7 +66,9 @@ FEATURE_TESTS_BASIC :=                  \
         sched_getcpu                   \
         sdt                            \
         setns                          \
-        libaio
+        libaio                         \
+        libzstd                                \
+        disassembler-four-args
 
 # FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
 # of all feature tests
@@ -118,7 +120,9 @@ FEATURE_DISPLAY ?=              \
          lzma                   \
          get_cpuid              \
          bpf                   \
-         libaio
+         libaio                        \
+         libzstd               \
+         disassembler-four-args
 
 # Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
 # If in the future we need per-feature checks/flags for features not
index 7ceb4441b6277729215ea4ea26193e60d0095f2e..4b8244ee65ce65a8e6b4907db53ee326ee057f92 100644 (file)
@@ -62,7 +62,8 @@ FILES=                                          \
          test-clang.bin                                \
          test-llvm.bin                         \
          test-llvm-version.bin                 \
-         test-libaio.bin
+         test-libaio.bin                       \
+         test-libzstd.bin
 
 FILES := $(addprefix $(OUTPUT),$(FILES))
 
@@ -301,6 +302,9 @@ $(OUTPUT)test-clang.bin:
 $(OUTPUT)test-libaio.bin:
        $(BUILD) -lrt
 
+$(OUTPUT)test-libzstd.bin:
+       $(BUILD) -lzstd
+
 ###############################
 
 clean:
index e903b86b742f29d31c383165dab4bec120ee04ae..a59c537050934b0d4bc9b773a7c2ac826dd01233 100644 (file)
 # include "test-reallocarray.c"
 #undef main
 
+#define main main_test_disassembler_four_args
+# include "test-disassembler-four-args.c"
+#undef main
+
+#define main main_test_zstd
+# include "test-libzstd.c"
+#undef main
+
 int main(int argc, char *argv[])
 {
        main_test_libpython();
@@ -219,6 +227,8 @@ int main(int argc, char *argv[])
        main_test_setns();
        main_test_libaio();
        main_test_reallocarray();
+       main_test_disassembler_four_args();
+       main_test_libzstd();
 
        return 0;
 }
index d68eb4fb40cc4261e6ad857d7ad656be0f6e95d6..2b0e02c3887076aa674357bbbf7ca7c24e1e7bbf 100644 (file)
@@ -4,9 +4,9 @@
 /*
  * Check OpenCSD library version is sufficient to provide required features
  */
-#define OCSD_MIN_VER ((0 << 16) | (10 << 8) | (0))
+#define OCSD_MIN_VER ((0 << 16) | (11 << 8) | (0))
 #if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER)
-#error "OpenCSD >= 0.10.0 is required"
+#error "OpenCSD >= 0.11.0 is required"
 #endif
 
 int main(void)
diff --git a/tools/build/feature/test-libzstd.c b/tools/build/feature/test-libzstd.c
new file mode 100644 (file)
index 0000000..55268c0
--- /dev/null
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <zstd.h>
+
+int main(void)
+{
+       ZSTD_CStream    *cstream;
+
+       cstream = ZSTD_createCStream();
+       ZSTD_freeCStream(cstream);
+
+       return 0;
+}
diff --git a/tools/include/uapi/asm-generic/mman-common-tools.h b/tools/include/uapi/asm-generic/mman-common-tools.h
new file mode 100644 (file)
index 0000000..af7d0d3
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
+#define __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
+
+#include <asm-generic/mman-common.h>
+
+/* We need this because we need to have tools/include/uapi/ included in the tools
+ * header search path to get access to stuff that is not yet in the system's
+ * copy of the files in that directory, but since this cset:
+ *
+ *     746c9398f5ac ("arch: move common mmap flags to linux/mman.h")
+ *
+ * We end up making sys/mman.h, that is in the system headers, to not find the
+ * MAP_SHARED and MAP_PRIVATE defines because they are not anymore in our copy
+ * of asm-generic/mman-common.h. So we define them here and include this header
+ * from each of the per arch mman.h headers.
+ */
+#ifndef MAP_SHARED
+#define MAP_SHARED     0x01            /* Share changes */
+#define MAP_PRIVATE    0x02            /* Changes are private */
+#define MAP_SHARED_VALIDATE 0x03       /* share + validate extension flags */
+#endif
+#endif // __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
index e7ee32861d51d4b2e47b9182a48c05fe837b8d21..abd238d0f7a48d718728cacde7853c60846bc539 100644 (file)
@@ -15,9 +15,7 @@
 #define PROT_GROWSDOWN 0x01000000      /* mprotect flag: extend change to start of growsdown vma */
 #define PROT_GROWSUP   0x02000000      /* mprotect flag: extend change to end of growsup vma */
 
-#define MAP_SHARED     0x01            /* Share changes */
-#define MAP_PRIVATE    0x02            /* Changes are private */
-#define MAP_SHARED_VALIDATE 0x03       /* share + validate extension flags */
+/* 0x01 - 0x03 are defined in linux/mman.h */
 #define MAP_TYPE       0x0f            /* Mask for type of mapping */
 #define MAP_FIXED      0x10            /* Interpret addr exactly */
 #define MAP_ANONYMOUS  0x20            /* don't use a file */
index 653687d9771b9d0e824fe4b25eee079f7fcec1cc..36c197fc44a0d5df08110715f11cb8b3eb6caff7 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef __ASM_GENERIC_MMAN_H
 #define __ASM_GENERIC_MMAN_H
 
-#include <asm-generic/mman-common.h>
+#include <asm-generic/mman-common-tools.h>
 
 #define MAP_GROWSDOWN  0x0100          /* stack-like segment */
 #define MAP_DENYWRITE  0x0800          /* ETXTBSY */
index d90127298f12d1536b7594f7cdebd324f3fe4db3..dee7292e1df6b162a12d0e55e9ccdf875fad428d 100644 (file)
@@ -38,8 +38,10 @@ __SYSCALL(__NR_io_destroy, sys_io_destroy)
 __SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
 #define __NR_io_cancel 3
 __SYSCALL(__NR_io_cancel, sys_io_cancel)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_io_getevents 4
-__SC_COMP(__NR_io_getevents, sys_io_getevents, compat_sys_io_getevents)
+__SC_3264(__NR_io_getevents, sys_io_getevents_time32, sys_io_getevents)
+#endif
 
 /* fs/xattr.c */
 #define __NR_setxattr 5
@@ -179,7 +181,7 @@ __SYSCALL(__NR_fchownat, sys_fchownat)
 #define __NR_fchown 55
 __SYSCALL(__NR_fchown, sys_fchown)
 #define __NR_openat 56
-__SC_COMP(__NR_openat, sys_openat, compat_sys_openat)
+__SYSCALL(__NR_openat, sys_openat)
 #define __NR_close 57
 __SYSCALL(__NR_close, sys_close)
 #define __NR_vhangup 58
@@ -222,10 +224,12 @@ __SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
 __SYSCALL(__NR3264_sendfile, sys_sendfile64)
 
 /* fs/select.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_pselect6 72
-__SC_COMP(__NR_pselect6, sys_pselect6, compat_sys_pselect6)
+__SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_pselect6_time32)
 #define __NR_ppoll 73
-__SC_COMP(__NR_ppoll, sys_ppoll, compat_sys_ppoll)
+__SC_COMP_3264(__NR_ppoll, sys_ppoll_time32, sys_ppoll, compat_sys_ppoll_time32)
+#endif
 
 /* fs/signalfd.c */
 #define __NR_signalfd4 74
@@ -269,16 +273,20 @@ __SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
 /* fs/timerfd.c */
 #define __NR_timerfd_create 85
 __SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_timerfd_settime 86
-__SC_COMP(__NR_timerfd_settime, sys_timerfd_settime, \
-         compat_sys_timerfd_settime)
+__SC_3264(__NR_timerfd_settime, sys_timerfd_settime32, \
+         sys_timerfd_settime)
 #define __NR_timerfd_gettime 87
-__SC_COMP(__NR_timerfd_gettime, sys_timerfd_gettime, \
-         compat_sys_timerfd_gettime)
+__SC_3264(__NR_timerfd_gettime, sys_timerfd_gettime32, \
+         sys_timerfd_gettime)
+#endif
 
 /* fs/utimes.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_utimensat 88
-__SC_COMP(__NR_utimensat, sys_utimensat, compat_sys_utimensat)
+__SC_3264(__NR_utimensat, sys_utimensat_time32, sys_utimensat)
+#endif
 
 /* kernel/acct.c */
 #define __NR_acct 89
@@ -309,8 +317,10 @@ __SYSCALL(__NR_set_tid_address, sys_set_tid_address)
 __SYSCALL(__NR_unshare, sys_unshare)
 
 /* kernel/futex.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_futex 98
-__SC_COMP(__NR_futex, sys_futex, compat_sys_futex)
+__SC_3264(__NR_futex, sys_futex_time32, sys_futex)
+#endif
 #define __NR_set_robust_list 99
 __SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
          compat_sys_set_robust_list)
@@ -319,8 +329,10 @@ __SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
          compat_sys_get_robust_list)
 
 /* kernel/hrtimer.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_nanosleep 101
-__SC_COMP(__NR_nanosleep, sys_nanosleep, compat_sys_nanosleep)
+__SC_3264(__NR_nanosleep, sys_nanosleep_time32, sys_nanosleep)
+#endif
 
 /* kernel/itimer.c */
 #define __NR_getitimer 102
@@ -341,23 +353,29 @@ __SYSCALL(__NR_delete_module, sys_delete_module)
 /* kernel/posix-timers.c */
 #define __NR_timer_create 107
 __SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_timer_gettime 108
-__SC_COMP(__NR_timer_gettime, sys_timer_gettime, compat_sys_timer_gettime)
+__SC_3264(__NR_timer_gettime, sys_timer_gettime32, sys_timer_gettime)
+#endif
 #define __NR_timer_getoverrun 109
 __SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_timer_settime 110
-__SC_COMP(__NR_timer_settime, sys_timer_settime, compat_sys_timer_settime)
+__SC_3264(__NR_timer_settime, sys_timer_settime32, sys_timer_settime)
+#endif
 #define __NR_timer_delete 111
 __SYSCALL(__NR_timer_delete, sys_timer_delete)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_clock_settime 112
-__SC_COMP(__NR_clock_settime, sys_clock_settime, compat_sys_clock_settime)
+__SC_3264(__NR_clock_settime, sys_clock_settime32, sys_clock_settime)
 #define __NR_clock_gettime 113
-__SC_COMP(__NR_clock_gettime, sys_clock_gettime, compat_sys_clock_gettime)
+__SC_3264(__NR_clock_gettime, sys_clock_gettime32, sys_clock_gettime)
 #define __NR_clock_getres 114
-__SC_COMP(__NR_clock_getres, sys_clock_getres, compat_sys_clock_getres)
+__SC_3264(__NR_clock_getres, sys_clock_getres_time32, sys_clock_getres)
 #define __NR_clock_nanosleep 115
-__SC_COMP(__NR_clock_nanosleep, sys_clock_nanosleep, \
-         compat_sys_clock_nanosleep)
+__SC_3264(__NR_clock_nanosleep, sys_clock_nanosleep_time32, \
+         sys_clock_nanosleep)
+#endif
 
 /* kernel/printk.c */
 #define __NR_syslog 116
@@ -388,9 +406,11 @@ __SYSCALL(__NR_sched_yield, sys_sched_yield)
 __SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
 #define __NR_sched_get_priority_min 126
 __SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_sched_rr_get_interval 127
-__SC_COMP(__NR_sched_rr_get_interval, sys_sched_rr_get_interval, \
-         compat_sys_sched_rr_get_interval)
+__SC_3264(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32, \
+         sys_sched_rr_get_interval)
+#endif
 
 /* kernel/signal.c */
 #define __NR_restart_syscall 128
@@ -411,9 +431,11 @@ __SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
 __SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask)
 #define __NR_rt_sigpending 136
 __SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_rt_sigtimedwait 137
-__SC_COMP(__NR_rt_sigtimedwait, sys_rt_sigtimedwait, \
-         compat_sys_rt_sigtimedwait)
+__SC_COMP_3264(__NR_rt_sigtimedwait, sys_rt_sigtimedwait_time32, \
+         sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
+#endif
 #define __NR_rt_sigqueueinfo 138
 __SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
          compat_sys_rt_sigqueueinfo)
@@ -467,10 +489,15 @@ __SYSCALL(__NR_uname, sys_newuname)
 __SYSCALL(__NR_sethostname, sys_sethostname)
 #define __NR_setdomainname 162
 __SYSCALL(__NR_setdomainname, sys_setdomainname)
+
+#ifdef __ARCH_WANT_SET_GET_RLIMIT
+/* getrlimit and setrlimit are superseded with prlimit64 */
 #define __NR_getrlimit 163
 __SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit)
 #define __NR_setrlimit 164
 __SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit)
+#endif
+
 #define __NR_getrusage 165
 __SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage)
 #define __NR_umask 166
@@ -481,12 +508,14 @@ __SYSCALL(__NR_prctl, sys_prctl)
 __SYSCALL(__NR_getcpu, sys_getcpu)
 
 /* kernel/time.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_gettimeofday 169
 __SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
 #define __NR_settimeofday 170
 __SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
 #define __NR_adjtimex 171
-__SC_COMP(__NR_adjtimex, sys_adjtimex, compat_sys_adjtimex)
+__SC_3264(__NR_adjtimex, sys_adjtimex_time32, sys_adjtimex)
+#endif
 
 /* kernel/timer.c */
 #define __NR_getpid 172
@@ -511,11 +540,13 @@ __SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
 __SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
 #define __NR_mq_unlink 181
 __SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_mq_timedsend 182
-__SC_COMP(__NR_mq_timedsend, sys_mq_timedsend, compat_sys_mq_timedsend)
+__SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
 #define __NR_mq_timedreceive 183
-__SC_COMP(__NR_mq_timedreceive, sys_mq_timedreceive, \
-         compat_sys_mq_timedreceive)
+__SC_3264(__NR_mq_timedreceive, sys_mq_timedreceive_time32, \
+         sys_mq_timedreceive)
+#endif
 #define __NR_mq_notify 184
 __SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
 #define __NR_mq_getsetattr 185
@@ -536,8 +567,10 @@ __SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
 __SYSCALL(__NR_semget, sys_semget)
 #define __NR_semctl 191
 __SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_semtimedop 192
-__SC_COMP(__NR_semtimedop, sys_semtimedop, compat_sys_semtimedop)
+__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32)
+#endif
 #define __NR_semop 193
 __SYSCALL(__NR_semop, sys_semop)
 
@@ -658,8 +691,10 @@ __SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
 __SYSCALL(__NR_perf_event_open, sys_perf_event_open)
 #define __NR_accept4 242
 __SYSCALL(__NR_accept4, sys_accept4)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_recvmmsg 243
-__SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
+__SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recvmmsg_time32)
+#endif
 
 /*
  * Architectures may provide up to 16 syscalls of their own
@@ -667,8 +702,10 @@ __SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
  */
 #define __NR_arch_specific_syscall 244
 
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_wait4 260
 __SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
+#endif
 #define __NR_prlimit64 261
 __SYSCALL(__NR_prlimit64, sys_prlimit64)
 #define __NR_fanotify_init 262
@@ -678,10 +715,11 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
 #define __NR_name_to_handle_at         264
 __SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
 #define __NR_open_by_handle_at         265
-__SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \
-         compat_sys_open_by_handle_at)
+__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_clock_adjtime 266
-__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
+__SC_3264(__NR_clock_adjtime, sys_clock_adjtime32, sys_clock_adjtime)
+#endif
 #define __NR_syncfs 267
 __SYSCALL(__NR_syncfs, sys_syncfs)
 #define __NR_setns 268
@@ -734,15 +772,69 @@ __SYSCALL(__NR_pkey_alloc,    sys_pkey_alloc)
 __SYSCALL(__NR_pkey_free,     sys_pkey_free)
 #define __NR_statx 291
 __SYSCALL(__NR_statx,     sys_statx)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
 #define __NR_io_pgetevents 292
-__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
+__SC_COMP_3264(__NR_io_pgetevents, sys_io_pgetevents_time32, sys_io_pgetevents, compat_sys_io_pgetevents)
+#endif
 #define __NR_rseq 293
 __SYSCALL(__NR_rseq, sys_rseq)
 #define __NR_kexec_file_load 294
 __SYSCALL(__NR_kexec_file_load,     sys_kexec_file_load)
+/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
+#if __BITS_PER_LONG == 32
+#define __NR_clock_gettime64 403
+__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
+#define __NR_clock_settime64 404
+__SYSCALL(__NR_clock_settime64, sys_clock_settime)
+#define __NR_clock_adjtime64 405
+__SYSCALL(__NR_clock_adjtime64, sys_clock_adjtime)
+#define __NR_clock_getres_time64 406
+__SYSCALL(__NR_clock_getres_time64, sys_clock_getres)
+#define __NR_clock_nanosleep_time64 407
+__SYSCALL(__NR_clock_nanosleep_time64, sys_clock_nanosleep)
+#define __NR_timer_gettime64 408
+__SYSCALL(__NR_timer_gettime64, sys_timer_gettime)
+#define __NR_timer_settime64 409
+__SYSCALL(__NR_timer_settime64, sys_timer_settime)
+#define __NR_timerfd_gettime64 410
+__SYSCALL(__NR_timerfd_gettime64, sys_timerfd_gettime)
+#define __NR_timerfd_settime64 411
+__SYSCALL(__NR_timerfd_settime64, sys_timerfd_settime)
+#define __NR_utimensat_time64 412
+__SYSCALL(__NR_utimensat_time64, sys_utimensat)
+#define __NR_pselect6_time64 413
+__SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64)
+#define __NR_ppoll_time64 414
+__SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64)
+#define __NR_io_pgetevents_time64 416
+__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
+#define __NR_recvmmsg_time64 417
+__SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64)
+#define __NR_mq_timedsend_time64 418
+__SYSCALL(__NR_mq_timedsend_time64, sys_mq_timedsend)
+#define __NR_mq_timedreceive_time64 419
+__SYSCALL(__NR_mq_timedreceive_time64, sys_mq_timedreceive)
+#define __NR_semtimedop_time64 420
+__SYSCALL(__NR_semtimedop_time64, sys_semtimedop)
+#define __NR_rt_sigtimedwait_time64 421
+__SC_COMP(__NR_rt_sigtimedwait_time64, sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time64)
+#define __NR_futex_time64 422
+__SYSCALL(__NR_futex_time64, sys_futex)
+#define __NR_sched_rr_get_interval_time64 423
+__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
+#endif
+
+#define __NR_pidfd_send_signal 424
+__SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal)
+#define __NR_io_uring_setup 425
+__SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
+#define __NR_io_uring_enter 426
+__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
+#define __NR_io_uring_register 427
+__SYSCALL(__NR_io_uring_register, sys_io_uring_register)
 
 #undef __NR_syscalls
-#define __NR_syscalls 295
+#define __NR_syscalls 428
 
 /*
  * 32 bit systems traditionally used different
index 298b2e197744bbc28782d1a853e1ee3577f02bee..397810fa2d33c95f69770bdf3563ea44213b40c6 100644 (file)
@@ -1486,9 +1486,73 @@ struct drm_i915_gem_context_param {
 #define   I915_CONTEXT_MAX_USER_PRIORITY       1023 /* inclusive */
 #define   I915_CONTEXT_DEFAULT_PRIORITY                0
 #define   I915_CONTEXT_MIN_USER_PRIORITY       -1023 /* inclusive */
+       /*
+        * When using the following param, value should be a pointer to
+        * drm_i915_gem_context_param_sseu.
+        */
+#define I915_CONTEXT_PARAM_SSEU                0x7
        __u64 value;
 };
 
+/**
+ * Context SSEU programming
+ *
+ * It may be necessary for either functional or performance reason to configure
+ * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
+ * Sub-slice/EU).
+ *
+ * This is done by configuring SSEU configuration using the below
+ * @struct drm_i915_gem_context_param_sseu for every supported engine which
+ * userspace intends to use.
+ *
+ * Not all GPUs or engines support this functionality in which case an error
+ * code -ENODEV will be returned.
+ *
+ * Also, flexibility of possible SSEU configuration permutations varies between
+ * GPU generations and software imposed limitations. Requesting such a
+ * combination will return an error code of -EINVAL.
+ *
+ * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
+ * favour of a single global setting.
+ */
+struct drm_i915_gem_context_param_sseu {
+       /*
+        * Engine class & instance to be configured or queried.
+        */
+       __u16 engine_class;
+       __u16 engine_instance;
+
+       /*
+        * Unused for now. Must be cleared to zero.
+        */
+       __u32 flags;
+
+       /*
+        * Mask of slices to enable for the context. Valid values are a subset
+        * of the bitmask value returned for I915_PARAM_SLICE_MASK.
+        */
+       __u64 slice_mask;
+
+       /*
+        * Mask of subslices to enable for the context. Valid values are a
+        * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
+        */
+       __u64 subslice_mask;
+
+       /*
+        * Minimum/Maximum number of EUs to enable per subslice for the
+        * context. min_eus_per_subslice must be inferior or equal to
+        * max_eus_per_subslice.
+        */
+       __u16 min_eus_per_subslice;
+       __u16 max_eus_per_subslice;
+
+       /*
+        * Unused for now. Must be cleared to zero.
+        */
+       __u32 rsvd;
+};
+
 enum drm_i915_oa_format {
        I915_OA_FORMAT_A13 = 1,     /* HSW only */
        I915_OA_FORMAT_A29,         /* HSW only */
index 3c38ac9a92a7c4b18cbb4ac49ac60bf887b03d20..929c8e537a14a517c0a3c7ca5b6b15353d622c30 100644 (file)
@@ -502,16 +502,6 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
- *     Description
- *             Push an element *value* in *map*. *flags* is one of:
- *
- *             **BPF_EXIST**
- *             If the queue/stack is full, the oldest element is removed to
- *             make room for this.
- *     Return
- *             0 on success, or a negative error in case of failure.
- *
  * int bpf_probe_read(void *dst, u32 size, const void *src)
  *     Description
  *             For tracing programs, safely attempt to read *size* bytes from
@@ -1435,14 +1425,14 @@ union bpf_attr {
  * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
  *     Description
  *             Equivalent to bpf_get_socket_cookie() helper that accepts
- *             *skb*, but gets socket from **struct bpf_sock_addr** contex.
+ *             *skb*, but gets socket from **struct bpf_sock_addr** context.
  *     Return
  *             A 8-byte long non-decreasing number.
  *
  * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
  *     Description
  *             Equivalent to bpf_get_socket_cookie() helper that accepts
- *             *skb*, but gets socket from **struct bpf_sock_ops** contex.
+ *             *skb*, but gets socket from **struct bpf_sock_ops** context.
  *     Return
  *             A 8-byte long non-decreasing number.
  *
@@ -2098,52 +2088,52 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * int bpf_rc_repeat(void *ctx)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
- *             report a successfully decoded key press with *scancode*,
- *             *toggle* value in the given *protocol*. The scancode will be
- *             translated to a keycode using the rc keymap, and reported as
- *             an input key down event. After a period a key up event is
- *             generated. This period can be extended by calling either
- *             **bpf_rc_keydown**\ () again with the same values, or calling
- *             **bpf_rc_repeat**\ ().
+ *             report a successfully decoded repeat key message. This delays
+ *             the generation of a key up event for previously generated
+ *             key down event.
  *
- *             Some protocols include a toggle bit, in case the button was
- *             released and pressed again between consecutive scancodes.
+ *             Some IR protocols like NEC have a special IR message for
+ *             repeating last button, for when a button is held down.
  *
  *             The *ctx* should point to the lirc sample as passed into
  *             the program.
  *
- *             The *protocol* is the decoded protocol number (see
- *             **enum rc_proto** for some predefined values).
- *
  *             This helper is only available is the kernel was compiled with
  *             the **CONFIG_BPF_LIRC_MODE2** configuration option set to
  *             "**y**".
  *     Return
  *             0
  *
- * int bpf_rc_repeat(void *ctx)
+ * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
- *             report a successfully decoded repeat key message. This delays
- *             the generation of a key up event for previously generated
- *             key down event.
+ *             report a successfully decoded key press with *scancode*,
+ *             *toggle* value in the given *protocol*. The scancode will be
+ *             translated to a keycode using the rc keymap, and reported as
+ *             an input key down event. After a period a key up event is
+ *             generated. This period can be extended by calling either
+ *             **bpf_rc_keydown**\ () again with the same values, or calling
+ *             **bpf_rc_repeat**\ ().
  *
- *             Some IR protocols like NEC have a special IR message for
- *             repeating last button, for when a button is held down.
+ *             Some protocols include a toggle bit, in case the button was
+ *             released and pressed again between consecutive scancodes.
  *
  *             The *ctx* should point to the lirc sample as passed into
  *             the program.
  *
+ *             The *protocol* is the decoded protocol number (see
+ *             **enum rc_proto** for some predefined values).
+ *
  *             This helper is only available is the kernel was compiled with
  *             the **CONFIG_BPF_LIRC_MODE2** configuration option set to
  *             "**y**".
  *     Return
  *             0
  *
- * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
+ * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
  *     Description
  *             Return the cgroup v2 id of the socket associated with the *skb*.
  *             This is roughly similar to the **bpf_get_cgroup_classid**\ ()
@@ -2159,30 +2149,12 @@ union bpf_attr {
  *     Return
  *             The id is returned or 0 in case the id could not be retrieved.
  *
- * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
- *     Description
- *             Return id of cgroup v2 that is ancestor of cgroup associated
- *             with the *skb* at the *ancestor_level*.  The root cgroup is at
- *             *ancestor_level* zero and each step down the hierarchy
- *             increments the level. If *ancestor_level* == level of cgroup
- *             associated with *skb*, then return value will be same as that
- *             of **bpf_skb_cgroup_id**\ ().
- *
- *             The helper is useful to implement policies based on cgroups
- *             that are upper in hierarchy than immediate cgroup associated
- *             with *skb*.
- *
- *             The format of returned id and helper limitations are same as in
- *             **bpf_skb_cgroup_id**\ ().
- *     Return
- *             The id is returned or 0 in case the id could not be retrieved.
- *
  * u64 bpf_get_current_cgroup_id(void)
  *     Return
  *             A 64-bit integer containing the current cgroup id based
  *             on the cgroup within which the current task is running.
  *
- * voidget_local_storage(void *map, u64 flags)
+ * void *bpf_get_local_storage(void *map, u64 flags)
  *     Description
  *             Get the pointer to the local storage area.
  *             The type and the size of the local storage is defined
@@ -2209,6 +2181,24 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
+ * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
+ *     Description
+ *             Return id of cgroup v2 that is ancestor of cgroup associated
+ *             with the *skb* at the *ancestor_level*.  The root cgroup is at
+ *             *ancestor_level* zero and each step down the hierarchy
+ *             increments the level. If *ancestor_level* == level of cgroup
+ *             associated with *skb*, then return value will be same as that
+ *             of **bpf_skb_cgroup_id**\ ().
+ *
+ *             The helper is useful to implement policies based on cgroups
+ *             that are upper in hierarchy than immediate cgroup associated
+ *             with *skb*.
+ *
+ *             The format of returned id and helper limitations are same as in
+ *             **bpf_skb_cgroup_id**\ ().
+ *     Return
+ *             The id is returned or 0 in case the id could not be retrieved.
+ *
  * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
  *     Description
  *             Look for TCP socket matching *tuple*, optionally in a child
@@ -2289,6 +2279,16 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
+ * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ *     Description
+ *             Push an element *value* in *map*. *flags* is one of:
+ *
+ *             **BPF_EXIST**
+ *                     If the queue/stack is full, the oldest element is
+ *                     removed to make room for this.
+ *     Return
+ *             0 on success, or a negative error in case of failure.
+ *
  * int bpf_map_pop_elem(struct bpf_map *map, void *value)
  *     Description
  *             Pop an element from *map*.
@@ -2343,29 +2343,94 @@ union bpf_attr {
  *     Return
  *             0
  *
+ * int bpf_spin_lock(struct bpf_spin_lock *lock)
+ *     Description
+ *             Acquire a spinlock represented by the pointer *lock*, which is
+ *             stored as part of a value of a map. Taking the lock allows to
+ *             safely update the rest of the fields in that value. The
+ *             spinlock can (and must) later be released with a call to
+ *             **bpf_spin_unlock**\ (\ *lock*\ ).
+ *
+ *             Spinlocks in BPF programs come with a number of restrictions
+ *             and constraints:
+ *
+ *             * **bpf_spin_lock** objects are only allowed inside maps of
+ *               types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
+ *               list could be extended in the future).
+ *             * BTF description of the map is mandatory.
+ *             * The BPF program can take ONE lock at a time, since taking two
+ *               or more could cause dead locks.
+ *             * Only one **struct bpf_spin_lock** is allowed per map element.
+ *             * When the lock is taken, calls (either BPF to BPF or helpers)
+ *               are not allowed.
+ *             * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
+ *               allowed inside a spinlock-ed region.
+ *             * The BPF program MUST call **bpf_spin_unlock**\ () to release
+ *               the lock, on all execution paths, before it returns.
+ *             * The BPF program can access **struct bpf_spin_lock** only via
+ *               the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
+ *               helpers. Loading or storing data into the **struct
+ *               bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
+ *             * To use the **bpf_spin_lock**\ () helper, the BTF description
+ *               of the map value must be a struct and have **struct
+ *               bpf_spin_lock** *anyname*\ **;** field at the top level.
+ *               Nested lock inside another struct is not allowed.
+ *             * The **struct bpf_spin_lock** *lock* field in a map value must
+ *               be aligned on a multiple of 4 bytes in that value.
+ *             * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
+ *               the **bpf_spin_lock** field to user space.
+ *             * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
+ *               a BPF program, do not update the **bpf_spin_lock** field.
+ *             * **bpf_spin_lock** cannot be on the stack or inside a
+ *               networking packet (it can only be inside of a map values).
+ *             * **bpf_spin_lock** is available to root only.
+ *             * Tracing programs and socket filter programs cannot use
+ *               **bpf_spin_lock**\ () due to insufficient preemption checks
+ *               (but this may change in the future).
+ *             * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
+ *     Return
+ *             0
+ *
+ * int bpf_spin_unlock(struct bpf_spin_lock *lock)
+ *     Description
+ *             Release the *lock* previously locked by a call to
+ *             **bpf_spin_lock**\ (\ *lock*\ ).
+ *     Return
+ *             0
+ *
  * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
  *     Description
  *             This helper gets a **struct bpf_sock** pointer such
- *             that all the fields in bpf_sock can be accessed.
+ *             that all the fields in this **bpf_sock** can be accessed.
  *     Return
- *             A **struct bpf_sock** pointer on success, or NULL in
+ *             A **struct bpf_sock** pointer on success, or **NULL** in
  *             case of failure.
  *
  * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
  *     Description
  *             This helper gets a **struct bpf_tcp_sock** pointer from a
  *             **struct bpf_sock** pointer.
- *
  *     Return
- *             A **struct bpf_tcp_sock** pointer on success, or NULL in
+ *             A **struct bpf_tcp_sock** pointer on success, or **NULL** in
  *             case of failure.
  *
  * int bpf_skb_ecn_set_ce(struct sk_buf *skb)
- *     Description
- *             Sets ECN of IP header to ce (congestion encountered) if
- *             current value is ect (ECN capable). Works with IPv6 and IPv4.
- *     Return
- *             1 if set, 0 if not set.
+ *     Description
+ *             Set ECN (Explicit Congestion Notification) field of IP header
+ *             to **CE** (Congestion Encountered) if current value is **ECT**
+ *             (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
+ *             and IPv4.
+ *     Return
+ *             1 if the **CE** flag is set (either by the current helper call
+ *             or because it was already present), 0 if it is not set.
+ *
+ * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
+ *     Description
+ *             Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
+ *             **bpf_sk_release**\ () is unnecessary and not allowed.
+ *     Return
+ *             A **struct bpf_sock** pointer on success, or **NULL** in
+ *             case of failure.
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -2465,7 +2530,8 @@ union bpf_attr {
        FN(spin_unlock),                \
        FN(sk_fullsock),                \
        FN(tcp_sock),                   \
-       FN(skb_ecn_set_ce),
+       FN(skb_ecn_set_ce),             \
+       FN(get_listener_sock),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
index 6448cdd9a350d3a0c6513c7a76aad29a68689bd4..a2f8658f1c555235de9fb4581acb113339981bdf 100644 (file)
@@ -41,6 +41,7 @@
 #define F_SEAL_SHRINK  0x0002  /* prevent file from shrinking */
 #define F_SEAL_GROW    0x0004  /* prevent file from growing */
 #define F_SEAL_WRITE   0x0008  /* prevent writes */
+#define F_SEAL_FUTURE_WRITE    0x0010  /* prevent future writes while mapped */
 /* (1U << 31) is reserved for signed error codes */
 
 /*
index a55cb8b10165abcf8a07d8228b590bbc1d8a0c08..e7ad9d350a283d81e89696e1bc42438030accc90 100644 (file)
@@ -292,10 +292,11 @@ struct sockaddr_in {
 #define        IN_LOOPBACK(a)          ((((long int) (a)) & 0xff000000) == 0x7f000000)
 
 /* Defines for Multicast INADDR */
-#define INADDR_UNSPEC_GROUP    0xe0000000U     /* 224.0.0.0   */
-#define INADDR_ALLHOSTS_GROUP  0xe0000001U     /* 224.0.0.1   */
-#define INADDR_ALLRTRS_GROUP    0xe0000002U    /* 224.0.0.2 */
-#define INADDR_MAX_LOCAL_GROUP  0xe00000ffU    /* 224.0.0.255 */
+#define INADDR_UNSPEC_GROUP            0xe0000000U     /* 224.0.0.0   */
+#define INADDR_ALLHOSTS_GROUP          0xe0000001U     /* 224.0.0.1   */
+#define INADDR_ALLRTRS_GROUP           0xe0000002U     /* 224.0.0.2 */
+#define INADDR_ALLSNOOPERS_GROUP       0xe000006aU     /* 224.0.0.106 */
+#define INADDR_MAX_LOCAL_GROUP         0xe00000ffU     /* 224.0.0.255 */
 #endif
 
 /* <asm/byteorder.h> contains the htonl type stuff.. */
index d0f515d53299ea5784ffdb61dd1b829b04fd045c..fc1a64c3447bf6e329bba5e155053332794a0187 100644 (file)
 #define OVERCOMMIT_ALWAYS              1
 #define OVERCOMMIT_NEVER               2
 
+#define MAP_SHARED     0x01            /* Share changes */
+#define MAP_PRIVATE    0x02            /* Changes are private */
+#define MAP_SHARED_VALIDATE 0x03       /* share + validate extension flags */
+
 /*
  * Huge page size encoding when MAP_HUGETLB is specified, and a huge page
  * size other than the default is desired.  See hugetlb_encode.h.
index 404d4b9ffe7644553a1b59fba043b151d935a2e9..df1153cea0b7ee2a27e19682837f81922fef353e 100644 (file)
@@ -32,6 +32,7 @@
 
 #ifndef __KERNEL__
 #include <stdlib.h>
+#include <time.h>
 #endif
 
 /*
index 512306a37531d829b880ebbe841aa87c1949b28f..0f257139b003e117eee62b82c87e2f66a20f8e36 100644 (file)
 #include "liburing.h"
 #include "barrier.h"
 
-#ifndef IOCQE_FLAG_CACHEHIT
-#define IOCQE_FLAG_CACHEHIT    (1U << 0)
-#endif
-
 #define min(a, b)              ((a < b) ? (a) : (b))
 
 struct io_sq_ring {
@@ -85,7 +81,6 @@ struct submitter {
        unsigned long reaps;
        unsigned long done;
        unsigned long calls;
-       unsigned long cachehit, cachemiss;
        volatile int finish;
 
        __s32 *fds;
@@ -270,10 +265,6 @@ static int reap_events(struct submitter *s)
                                return -1;
                        }
                }
-               if (cqe->flags & IOCQE_FLAG_CACHEHIT)
-                       s->cachehit++;
-               else
-                       s->cachemiss++;
                reaped++;
                head++;
        } while (1);
@@ -489,7 +480,7 @@ static void file_depths(char *buf)
 int main(int argc, char *argv[])
 {
        struct submitter *s = &submitters[0];
-       unsigned long done, calls, reap, cache_hit, cache_miss;
+       unsigned long done, calls, reap;
        int err, i, flags, fd;
        char *fdepths;
        void *ret;
@@ -569,44 +560,29 @@ int main(int argc, char *argv[])
        pthread_create(&s->thread, NULL, submitter_fn, s);
 
        fdepths = malloc(8 * s->nr_files);
-       cache_hit = cache_miss = reap = calls = done = 0;
+       reap = calls = done = 0;
        do {
                unsigned long this_done = 0;
                unsigned long this_reap = 0;
                unsigned long this_call = 0;
-               unsigned long this_cache_hit = 0;
-               unsigned long this_cache_miss = 0;
                unsigned long rpc = 0, ipc = 0;
-               double hit = 0.0;
 
                sleep(1);
                this_done += s->done;
                this_call += s->calls;
                this_reap += s->reaps;
-               this_cache_hit += s->cachehit;
-               this_cache_miss += s->cachemiss;
-               if (this_cache_hit && this_cache_miss) {
-                       unsigned long hits, total;
-
-                       hits = this_cache_hit - cache_hit;
-                       total = hits + this_cache_miss - cache_miss;
-                       hit = (double) hits / (double) total;
-                       hit *= 100.0;
-               }
                if (this_call - calls) {
                        rpc = (this_done - done) / (this_call - calls);
                        ipc = (this_reap - reap) / (this_call - calls);
                } else
                        rpc = ipc = -1;
                file_depths(fdepths);
-               printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s), Cachehit=%0.2f%%\n",
+               printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s)\n",
                                this_done - done, rpc, ipc, s->inflight,
-                               fdepths, hit);
+                               fdepths);
                done = this_done;
                calls = this_call;
                reap = this_reap;
-               cache_hit = s->cachehit;
-               cache_miss = s->cachemiss;
        } while (!finish);
 
        pthread_join(s->thread, &ret);
index 4db74758c6743e2a32800eef302f9cdeddc8a2ee..fecb78afea3feb634750bf823811f1d5a70b3482 100644 (file)
@@ -1,3 +1,4 @@
 libbpf_version.h
 FEATURE-DUMP.libbpf
 test_libbpf
+libbpf.so.*
index 61aaacf0cfa153bd8e798b2a028fa2832cc1ab93..8e7c56e9590fbb0dfa2d76780696206b166ec440 100644 (file)
@@ -3,7 +3,7 @@
 
 BPF_VERSION = 0
 BPF_PATCHLEVEL = 0
-BPF_EXTRAVERSION = 1
+BPF_EXTRAVERSION = 2
 
 MAKEFLAGS += --no-print-directory
 
@@ -79,8 +79,6 @@ export prefix libdir src obj
 libdir_SQ = $(subst ','\'',$(libdir))
 libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
 
-LIB_FILE = libbpf.a libbpf.so
-
 VERSION                = $(BPF_VERSION)
 PATCHLEVEL     = $(BPF_PATCHLEVEL)
 EXTRAVERSION   = $(BPF_EXTRAVERSION)
@@ -88,7 +86,10 @@ EXTRAVERSION = $(BPF_EXTRAVERSION)
 OBJ            = $@
 N              =
 
-LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION)
+LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION)
+
+LIB_TARGET     = libbpf.a libbpf.so.$(LIBBPF_VERSION)
+LIB_FILE       = libbpf.a libbpf.so*
 
 # Set compile option CFLAGS
 ifdef EXTRA_CFLAGS
@@ -128,16 +129,18 @@ all:
 export srctree OUTPUT CC LD CFLAGS V
 include $(srctree)/tools/build/Makefile.include
 
-BPF_IN    := $(OUTPUT)libbpf-in.o
-LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
-VERSION_SCRIPT := libbpf.map
+BPF_IN         := $(OUTPUT)libbpf-in.o
+VERSION_SCRIPT := libbpf.map
+
+LIB_TARGET     := $(addprefix $(OUTPUT),$(LIB_TARGET))
+LIB_FILE       := $(addprefix $(OUTPUT),$(LIB_FILE))
 
 GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
                           awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}')
 VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \
                              grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
 
-CMD_TARGETS = $(LIB_FILE)
+CMD_TARGETS = $(LIB_TARGET)
 
 CXX_TEST_TARGET = $(OUTPUT)test_libbpf
 
@@ -170,9 +173,13 @@ $(BPF_IN): force elfdep bpfdep
        echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
        $(Q)$(MAKE) $(build)=libbpf
 
-$(OUTPUT)libbpf.so: $(BPF_IN)
-       $(QUIET_LINK)$(CC) --shared -Wl,--version-script=$(VERSION_SCRIPT) \
-               $^ -o $@
+$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
+
+$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
+       $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \
+                                   -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
+       @ln -sf $(@F) $(OUTPUT)libbpf.so
+       @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION)
 
 $(OUTPUT)libbpf.a: $(BPF_IN)
        $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
@@ -192,6 +199,12 @@ check_abi: $(OUTPUT)libbpf.so
                exit 1;                                                  \
        fi
 
+define do_install_mkdir
+       if [ ! -d '$(DESTDIR_SQ)$1' ]; then             \
+               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
+       fi
+endef
+
 define do_install
        if [ ! -d '$(DESTDIR_SQ)$2' ]; then             \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
@@ -200,14 +213,16 @@ define do_install
 endef
 
 install_lib: all_cmd
-       $(call QUIET_INSTALL, $(LIB_FILE)) \
-               $(call do_install,$(LIB_FILE),$(libdir_SQ))
+       $(call QUIET_INSTALL, $(LIB_TARGET)) \
+               $(call do_install_mkdir,$(libdir_SQ)); \
+               cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
 
 install_headers:
        $(call QUIET_INSTALL, headers) \
                $(call do_install,bpf.h,$(prefix)/include/bpf,644); \
-               $(call do_install,libbpf.h,$(prefix)/include/bpf,644);
-               $(call do_install,btf.h,$(prefix)/include/bpf,644);
+               $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
+               $(call do_install,btf.h,$(prefix)/include/bpf,644); \
+               $(call do_install,xsk.h,$(prefix)/include/bpf,644);
 
 install: install_lib
 
@@ -219,7 +234,7 @@ config-clean:
 
 clean:
        $(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \
-               *.o *~ *.a *.so .*.d .*.cmd LIBBPF-CFLAGS
+               *.o *~ *.a *.so *.so.$(VERSION) .*.d .*.cmd LIBBPF-CFLAGS
        $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
 
 
index 5788479384cad11141cb77b1295eea26ca91bc09..cef7b77eab69507bcafa8cfe99b68cabb4f4ac78 100644 (file)
@@ -111,6 +111,7 @@ starting from ``0.0.1``.
 
 Every time ABI is being changed, e.g. because a new symbol is added or
 semantic of existing symbol is changed, ABI version should be bumped.
+This bump in ABI version is at most once per kernel development cycle.
 
 For example, if current state of ``libbpf.map`` is:
 
index 1b8d8cdd35750f7b9fc4f2fffb044f300e6bade9..cf119c9b6f2700e790ec02e16b524f6b3a4cf581 100644 (file)
@@ -1602,16 +1602,12 @@ static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
 /* Calculate type signature hash of ENUM. */
 static __u32 btf_hash_enum(struct btf_type *t)
 {
-       struct btf_enum *member = (struct btf_enum *)(t + 1);
-       __u32 vlen = BTF_INFO_VLEN(t->info);
-       __u32 h = btf_hash_common(t);
-       int i;
+       __u32 h;
 
-       for (i = 0; i < vlen; i++) {
-               h = hash_combine(h, member->name_off);
-               h = hash_combine(h, member->val);
-               member++;
-       }
+       /* don't hash vlen and enum members to support enum fwd resolving */
+       h = hash_combine(0, t->name_off);
+       h = hash_combine(h, t->info & ~0xffff);
+       h = hash_combine(h, t->size);
        return h;
 }
 
@@ -1637,6 +1633,22 @@ static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
        return true;
 }
 
+static inline bool btf_is_enum_fwd(struct btf_type *t)
+{
+       return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM &&
+              BTF_INFO_VLEN(t->info) == 0;
+}
+
+static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
+{
+       if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
+               return btf_equal_enum(t1, t2);
+       /* ignore vlen when comparing */
+       return t1->name_off == t2->name_off &&
+              (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
+              t1->size == t2->size;
+}
+
 /*
  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
  * as referenced type IDs equivalence is established separately during type
@@ -1860,6 +1872,17 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
                                new_id = cand_node->type_id;
                                break;
                        }
+                       if (d->opts.dont_resolve_fwds)
+                               continue;
+                       if (btf_compat_enum(t, cand)) {
+                               if (btf_is_enum_fwd(t)) {
+                                       /* resolve fwd to full enum */
+                                       new_id = cand_node->type_id;
+                                       break;
+                               }
+                               /* resolve canonical enum fwd to full enum */
+                               d->map[cand_node->type_id] = type_id;
+                       }
                }
                break;
 
@@ -2084,7 +2107,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
                return fwd_kind == real_kind;
        }
 
-       if (cand_type->info != canon_type->info)
+       if (cand_kind != canon_kind)
                return 0;
 
        switch (cand_kind) {
@@ -2092,7 +2115,10 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
                return btf_equal_int(cand_type, canon_type);
 
        case BTF_KIND_ENUM:
-               return btf_equal_enum(cand_type, canon_type);
+               if (d->opts.dont_resolve_fwds)
+                       return btf_equal_enum(cand_type, canon_type);
+               else
+                       return btf_compat_enum(cand_type, canon_type);
 
        case BTF_KIND_FWD:
                return btf_equal_common(cand_type, canon_type);
@@ -2103,6 +2129,8 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
        case BTF_KIND_PTR:
        case BTF_KIND_TYPEDEF:
        case BTF_KIND_FUNC:
+               if (cand_type->info != canon_type->info)
+                       return 0;
                return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
 
        case BTF_KIND_ARRAY: {
index d5b830d606010ad33fd780a514e3684f07d92506..11c25d9ea43124fc6e67fab5a7dc8b93f5dc9e4a 100644 (file)
@@ -112,6 +112,11 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...)
 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
 #endif
 
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+       return (__u64) (unsigned long) ptr;
+}
+
 struct bpf_capabilities {
        /* v4.14: kernel support for program & map names. */
        __u32 name:1;
@@ -622,7 +627,7 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
        bool strict = !(flags & MAPS_RELAX_COMPAT);
        int i, map_idx, map_def_sz, nr_maps = 0;
        Elf_Scn *scn;
-       Elf_Data *data;
+       Elf_Data *data = NULL;
        Elf_Data *symbols = obj->efile.symbols;
 
        if (obj->efile.maps_shndx < 0)
@@ -835,12 +840,19 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
                        obj->efile.maps_shndx = idx;
                else if (strcmp(name, BTF_ELF_SEC) == 0) {
                        obj->btf = btf__new(data->d_buf, data->d_size);
-                       if (IS_ERR(obj->btf) || btf__load(obj->btf)) {
+                       if (IS_ERR(obj->btf)) {
                                pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
                                           BTF_ELF_SEC, PTR_ERR(obj->btf));
-                               if (!IS_ERR(obj->btf))
-                                       btf__free(obj->btf);
                                obj->btf = NULL;
+                               continue;
+                       }
+                       err = btf__load(obj->btf);
+                       if (err) {
+                               pr_warning("Error loading %s into kernel: %d. Ignored and continue.\n",
+                                          BTF_ELF_SEC, err);
+                               btf__free(obj->btf);
+                               obj->btf = NULL;
+                               err = 0;
                        }
                } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
                        btf_ext_data = data;
@@ -2999,3 +3011,249 @@ bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
        ring_buffer_write_tail(header, data_tail);
        return ret;
 }
+
+struct bpf_prog_info_array_desc {
+       int     array_offset;   /* e.g. offset of jited_prog_insns */
+       int     count_offset;   /* e.g. offset of jited_prog_len */
+       int     size_offset;    /* > 0: offset of rec size,
+                                * < 0: fix size of -size_offset
+                                */
+};
+
+static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
+       [BPF_PROG_INFO_JITED_INSNS] = {
+               offsetof(struct bpf_prog_info, jited_prog_insns),
+               offsetof(struct bpf_prog_info, jited_prog_len),
+               -1,
+       },
+       [BPF_PROG_INFO_XLATED_INSNS] = {
+               offsetof(struct bpf_prog_info, xlated_prog_insns),
+               offsetof(struct bpf_prog_info, xlated_prog_len),
+               -1,
+       },
+       [BPF_PROG_INFO_MAP_IDS] = {
+               offsetof(struct bpf_prog_info, map_ids),
+               offsetof(struct bpf_prog_info, nr_map_ids),
+               -(int)sizeof(__u32),
+       },
+       [BPF_PROG_INFO_JITED_KSYMS] = {
+               offsetof(struct bpf_prog_info, jited_ksyms),
+               offsetof(struct bpf_prog_info, nr_jited_ksyms),
+               -(int)sizeof(__u64),
+       },
+       [BPF_PROG_INFO_JITED_FUNC_LENS] = {
+               offsetof(struct bpf_prog_info, jited_func_lens),
+               offsetof(struct bpf_prog_info, nr_jited_func_lens),
+               -(int)sizeof(__u32),
+       },
+       [BPF_PROG_INFO_FUNC_INFO] = {
+               offsetof(struct bpf_prog_info, func_info),
+               offsetof(struct bpf_prog_info, nr_func_info),
+               offsetof(struct bpf_prog_info, func_info_rec_size),
+       },
+       [BPF_PROG_INFO_LINE_INFO] = {
+               offsetof(struct bpf_prog_info, line_info),
+               offsetof(struct bpf_prog_info, nr_line_info),
+               offsetof(struct bpf_prog_info, line_info_rec_size),
+       },
+       [BPF_PROG_INFO_JITED_LINE_INFO] = {
+               offsetof(struct bpf_prog_info, jited_line_info),
+               offsetof(struct bpf_prog_info, nr_jited_line_info),
+               offsetof(struct bpf_prog_info, jited_line_info_rec_size),
+       },
+       [BPF_PROG_INFO_PROG_TAGS] = {
+               offsetof(struct bpf_prog_info, prog_tags),
+               offsetof(struct bpf_prog_info, nr_prog_tags),
+               -(int)sizeof(__u8) * BPF_TAG_SIZE,
+       },
+
+};
+
+static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
+{
+       __u32 *array = (__u32 *)info;
+
+       if (offset >= 0)
+               return array[offset / sizeof(__u32)];
+       return -(int)offset;
+}
+
+static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
+{
+       __u64 *array = (__u64 *)info;
+
+       if (offset >= 0)
+               return array[offset / sizeof(__u64)];
+       return -(int)offset;
+}
+
+static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
+                                        __u32 val)
+{
+       __u32 *array = (__u32 *)info;
+
+       if (offset >= 0)
+               array[offset / sizeof(__u32)] = val;
+}
+
+static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
+                                        __u64 val)
+{
+       __u64 *array = (__u64 *)info;
+
+       if (offset >= 0)
+               array[offset / sizeof(__u64)] = val;
+}
+
+struct bpf_prog_info_linear *
+bpf_program__get_prog_info_linear(int fd, __u64 arrays)
+{
+       struct bpf_prog_info_linear *info_linear;
+       struct bpf_prog_info info = {};
+       __u32 info_len = sizeof(info);
+       __u32 data_len = 0;
+       int i, err;
+       void *ptr;
+
+       if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
+               return ERR_PTR(-EINVAL);
+
+       /* step 1: get array dimensions */
+       err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+       if (err) {
+               pr_debug("can't get prog info: %s", strerror(errno));
+               return ERR_PTR(-EFAULT);
+       }
+
+       /* step 2: calculate total size of all arrays */
+       for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+               bool include_array = (arrays & (1UL << i)) > 0;
+               struct bpf_prog_info_array_desc *desc;
+               __u32 count, size;
+
+               desc = bpf_prog_info_array_desc + i;
+
+               /* kernel is too old to support this field */
+               if (info_len < desc->array_offset + sizeof(__u32) ||
+                   info_len < desc->count_offset + sizeof(__u32) ||
+                   (desc->size_offset > 0 && info_len < desc->size_offset))
+                       include_array = false;
+
+               if (!include_array) {
+                       arrays &= ~(1UL << i);  /* clear the bit */
+                       continue;
+               }
+
+               count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
+               size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
+
+               data_len += count * size;
+       }
+
+       /* step 3: allocate continuous memory */
+       data_len = roundup(data_len, sizeof(__u64));
+       info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
+       if (!info_linear)
+               return ERR_PTR(-ENOMEM);
+
+       /* step 4: fill data to info_linear->info */
+       info_linear->arrays = arrays;
+       memset(&info_linear->info, 0, sizeof(info));
+       ptr = info_linear->data;
+
+       for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+               struct bpf_prog_info_array_desc *desc;
+               __u32 count, size;
+
+               if ((arrays & (1UL << i)) == 0)
+                       continue;
+
+               desc  = bpf_prog_info_array_desc + i;
+               count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
+               size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
+               bpf_prog_info_set_offset_u32(&info_linear->info,
+                                            desc->count_offset, count);
+               bpf_prog_info_set_offset_u32(&info_linear->info,
+                                            desc->size_offset, size);
+               bpf_prog_info_set_offset_u64(&info_linear->info,
+                                            desc->array_offset,
+                                            ptr_to_u64(ptr));
+               ptr += count * size;
+       }
+
+       /* step 5: call syscall again to get required arrays */
+       err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
+       if (err) {
+               pr_debug("can't get prog info: %s", strerror(errno));
+               free(info_linear);
+               return ERR_PTR(-EFAULT);
+       }
+
+       /* step 6: verify the data */
+       for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+               struct bpf_prog_info_array_desc *desc;
+               __u32 v1, v2;
+
+               if ((arrays & (1UL << i)) == 0)
+                       continue;
+
+               desc = bpf_prog_info_array_desc + i;
+               v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
+               v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
+                                                  desc->count_offset);
+               if (v1 != v2)
+                       pr_warning("%s: mismatch in element count\n", __func__);
+
+               v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
+               v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
+                                                  desc->size_offset);
+               if (v1 != v2)
+                       pr_warning("%s: mismatch in rec size\n", __func__);
+       }
+
+       /* step 7: update info_len and data_len */
+       info_linear->info_len = sizeof(struct bpf_prog_info);
+       info_linear->data_len = data_len;
+
+       return info_linear;
+}
+
+void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
+{
+       int i;
+
+       for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+               struct bpf_prog_info_array_desc *desc;
+               __u64 addr, offs;
+
+               if ((info_linear->arrays & (1UL << i)) == 0)
+                       continue;
+
+               desc = bpf_prog_info_array_desc + i;
+               addr = bpf_prog_info_read_offset_u64(&info_linear->info,
+                                                    desc->array_offset);
+               offs = addr - ptr_to_u64(info_linear->data);
+               bpf_prog_info_set_offset_u64(&info_linear->info,
+                                            desc->array_offset, offs);
+       }
+}
+
+void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
+{
+       int i;
+
+       for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+               struct bpf_prog_info_array_desc *desc;
+               __u64 addr, offs;
+
+               if ((info_linear->arrays & (1UL << i)) == 0)
+                       continue;
+
+               desc = bpf_prog_info_array_desc + i;
+               offs = bpf_prog_info_read_offset_u64(&info_linear->info,
+                                                    desc->array_offset);
+               addr = offs + ptr_to_u64(info_linear->data);
+               bpf_prog_info_set_offset_u64(&info_linear->info,
+                                            desc->array_offset, addr);
+       }
+}
index b4652aa1a58adf6d8a42c70467d31e5d434f74f4..c70785cc8ef560165e323abcf9e83f8cc05eec31 100644 (file)
@@ -10,6 +10,7 @@
 #ifndef __LIBBPF_LIBBPF_H
 #define __LIBBPF_LIBBPF_H
 
+#include <stdarg.h>
 #include <stdio.h>
 #include <stdint.h>
 #include <stdbool.h>
@@ -377,6 +378,69 @@ LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
 LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id,
                                 enum bpf_prog_type prog_type, __u32 ifindex);
 
+/*
+ * Get bpf_prog_info in continuous memory
+ *
+ * struct bpf_prog_info has multiple arrays. The user has option to choose
+ * arrays to fetch from kernel. The following APIs provide an uniform way to
+ * fetch these data. All arrays in bpf_prog_info are stored in a single
+ * continuous memory region. This makes it easy to store the info in a
+ * file.
+ *
+ * Before writing bpf_prog_info_linear to files, it is necessary to
+ * translate pointers in bpf_prog_info to offsets. Helper functions
+ * bpf_program__bpil_addr_to_offs() and bpf_program__bpil_offs_to_addr()
+ * are introduced to switch between pointers and offsets.
+ *
+ * Examples:
+ *   # To fetch map_ids and prog_tags:
+ *   __u64 arrays = (1UL << BPF_PROG_INFO_MAP_IDS) |
+ *           (1UL << BPF_PROG_INFO_PROG_TAGS);
+ *   struct bpf_prog_info_linear *info_linear =
+ *           bpf_program__get_prog_info_linear(fd, arrays);
+ *
+ *   # To save data in file
+ *   bpf_program__bpil_addr_to_offs(info_linear);
+ *   write(f, info_linear, sizeof(*info_linear) + info_linear->data_len);
+ *
+ *   # To read data from file
+ *   read(f, info_linear, <proper_size>);
+ *   bpf_program__bpil_offs_to_addr(info_linear);
+ */
+enum bpf_prog_info_array {
+       BPF_PROG_INFO_FIRST_ARRAY = 0,
+       BPF_PROG_INFO_JITED_INSNS = 0,
+       BPF_PROG_INFO_XLATED_INSNS,
+       BPF_PROG_INFO_MAP_IDS,
+       BPF_PROG_INFO_JITED_KSYMS,
+       BPF_PROG_INFO_JITED_FUNC_LENS,
+       BPF_PROG_INFO_FUNC_INFO,
+       BPF_PROG_INFO_LINE_INFO,
+       BPF_PROG_INFO_JITED_LINE_INFO,
+       BPF_PROG_INFO_PROG_TAGS,
+       BPF_PROG_INFO_LAST_ARRAY,
+};
+
+struct bpf_prog_info_linear {
+       /* size of struct bpf_prog_info, when the tool is compiled */
+       __u32                   info_len;
+       /* total bytes allocated for data, round up to 8 bytes */
+       __u32                   data_len;
+       /* which arrays are included in data */
+       __u64                   arrays;
+       struct bpf_prog_info    info;
+       __u8                    data[];
+};
+
+LIBBPF_API struct bpf_prog_info_linear *
+bpf_program__get_prog_info_linear(int fd, __u64 arrays);
+
+LIBBPF_API void
+bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
+
+LIBBPF_API void
+bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
+
 #ifdef __cplusplus
 } /* extern "C" */
 #endif
index 778a26702a707882e126d7d347a13250d79d70a7..f3ce50500cf2985edae7202b5dbf09b63c402d3f 100644 (file)
@@ -153,4 +153,7 @@ LIBBPF_0.0.2 {
                xsk_socket__delete;
                xsk_umem__fd;
                xsk_socket__fd;
+               bpf_program__get_prog_info_linear;
+               bpf_program__bpil_addr_to_offs;
+               bpf_program__bpil_offs_to_addr;
 } LIBBPF_0.0.1;
index f98ac82c9aea51fa5e0d9314c8673f0c0771e0a9..8d0078b65486f45730f3d967c84a6709afb23cdb 100644 (file)
@@ -126,8 +126,8 @@ static void xsk_set_umem_config(struct xsk_umem_config *cfg,
        cfg->frame_headroom = usr_cfg->frame_headroom;
 }
 
-static void xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
-                                     const struct xsk_socket_config *usr_cfg)
+static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
+                                    const struct xsk_socket_config *usr_cfg)
 {
        if (!usr_cfg) {
                cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
@@ -135,14 +135,19 @@ static void xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
                cfg->libbpf_flags = 0;
                cfg->xdp_flags = 0;
                cfg->bind_flags = 0;
-               return;
+               return 0;
        }
 
+       if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
+               return -EINVAL;
+
        cfg->rx_size = usr_cfg->rx_size;
        cfg->tx_size = usr_cfg->tx_size;
        cfg->libbpf_flags = usr_cfg->libbpf_flags;
        cfg->xdp_flags = usr_cfg->xdp_flags;
        cfg->bind_flags = usr_cfg->bind_flags;
+
+       return 0;
 }
 
 int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
@@ -557,7 +562,9 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
        }
        strncpy(xsk->ifname, ifname, IFNAMSIZ);
 
-       xsk_set_xdp_socket_config(&xsk->config, usr_config);
+       err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
+       if (err)
+               goto out_socket;
 
        if (rx) {
                err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
index d463761a58f4359fa27c3d755a7673f8064384f8..988587840c801eac49391d318e281589aad97e35 100644 (file)
@@ -8,6 +8,22 @@
 #include "event-parse-local.h"
 #include "event-utils.h"
 
+/**
+ * tep_get_event - returns the event with the given index
+ * @tep: a handle to the tep_handle
+ * @index: index of the requested event, in the range 0 .. nr_events
+ *
+ * This returns pointer to the element of the events array with the given index
+ * If @tep is NULL, or @index is not in the range 0 .. nr_events, NULL is returned.
+ */
+struct tep_event *tep_get_event(struct tep_handle *tep, int index)
+{
+       if (tep && tep->events && index < tep->nr_events)
+               return tep->events[index];
+
+       return NULL;
+}
+
 /**
  * tep_get_first_event - returns the first event in the events array
  * @tep: a handle to the tep_handle
  */
 struct tep_event *tep_get_first_event(struct tep_handle *tep)
 {
-       if (tep && tep->events)
-               return tep->events[0];
-
-       return NULL;
+       return tep_get_event(tep, 0);
 }
 
 /**
@@ -32,7 +45,7 @@ struct tep_event *tep_get_first_event(struct tep_handle *tep)
  */
 int tep_get_events_count(struct tep_handle *tep)
 {
-       if(tep)
+       if (tep)
                return tep->nr_events;
        return 0;
 }
@@ -43,19 +56,47 @@ int tep_get_events_count(struct tep_handle *tep)
  * @flag: flag, or combination of flags to be set
  * can be any combination from enum tep_flag
  *
- * This sets a flag or mbination of flags  from enum tep_flag
 */
+ * This sets a flag or combination of flags from enum tep_flag
+ */
 void tep_set_flag(struct tep_handle *tep, int flag)
 {
-       if(tep)
+       if (tep)
                tep->flags |= flag;
 }
 
-unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data)
+/**
+ * tep_clear_flag - clear event parser flag
+ * @tep: a handle to the tep_handle
+ * @flag: flag to be cleared
+ *
+ * This clears a tep flag
+ */
+void tep_clear_flag(struct tep_handle *tep, enum tep_flag flag)
+{
+       if (tep)
+               tep->flags &= ~flag;
+}
+
+/**
+ * tep_test_flag - check the state of event parser flag
+ * @tep: a handle to the tep_handle
+ * @flag: flag to be checked
+ *
+ * This returns the state of the requested tep flag.
+ * Returns: true if the flag is set, false otherwise.
+ */
+bool tep_test_flag(struct tep_handle *tep, enum tep_flag flag)
+{
+       if (tep)
+               return tep->flags & flag;
+       return false;
+}
+
+unsigned short tep_data2host2(struct tep_handle *tep, unsigned short data)
 {
        unsigned short swap;
 
-       if (!pevent || pevent->host_bigendian == pevent->file_bigendian)
+       if (!tep || tep->host_bigendian == tep->file_bigendian)
                return data;
 
        swap = ((data & 0xffULL) << 8) |
@@ -64,11 +105,11 @@ unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data)
        return swap;
 }
 
-unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data)
+unsigned int tep_data2host4(struct tep_handle *tep, unsigned int data)
 {
        unsigned int swap;
 
-       if (!pevent || pevent->host_bigendian == pevent->file_bigendian)
+       if (!tep || tep->host_bigendian == tep->file_bigendian)
                return data;
 
        swap = ((data & 0xffULL) << 24) |
@@ -80,11 +121,11 @@ unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data)
 }
 
 unsigned long long
-tep_data2host8(struct tep_handle *pevent, unsigned long long data)
+tep_data2host8(struct tep_handle *tep, unsigned long long data)
 {
        unsigned long long swap;
 
-       if (!pevent || pevent->host_bigendian == pevent->file_bigendian)
+       if (!tep || tep->host_bigendian == tep->file_bigendian)
                return data;
 
        swap = ((data & 0xffULL) << 56) |
@@ -101,175 +142,232 @@ tep_data2host8(struct tep_handle *pevent, unsigned long long data)
 
 /**
  * tep_get_header_page_size - get size of the header page
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
  *
  * This returns size of the header page
- * If @pevent is NULL, 0 is returned.
+ * If @tep is NULL, 0 is returned.
+ */
+int tep_get_header_page_size(struct tep_handle *tep)
+{
+       if (tep)
+               return tep->header_page_size_size;
+       return 0;
+}
+
+/**
+ * tep_get_header_timestamp_size - get size of the timestamp in the header page
+ * @tep: a handle to the tep_handle
+ *
+ * This returns size of the timestamp in the header page
+ * If @tep is NULL, 0 is returned.
  */
-int tep_get_header_page_size(struct tep_handle *pevent)
+int tep_get_header_timestamp_size(struct tep_handle *tep)
 {
-       if(pevent)
-               return pevent->header_page_size_size;
+       if (tep)
+               return tep->header_page_ts_size;
        return 0;
 }
 
 /**
  * tep_get_cpus - get the number of CPUs
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
  *
  * This returns the number of CPUs
- * If @pevent is NULL, 0 is returned.
+ * If @tep is NULL, 0 is returned.
  */
-int tep_get_cpus(struct tep_handle *pevent)
+int tep_get_cpus(struct tep_handle *tep)
 {
-       if(pevent)
-               return pevent->cpus;
+       if (tep)
+               return tep->cpus;
        return 0;
 }
 
 /**
  * tep_set_cpus - set the number of CPUs
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
  *
  * This sets the number of CPUs
  */
-void tep_set_cpus(struct tep_handle *pevent, int cpus)
+void tep_set_cpus(struct tep_handle *tep, int cpus)
 {
-       if(pevent)
-               pevent->cpus = cpus;
+       if (tep)
+               tep->cpus = cpus;
 }
 
 /**
- * tep_get_long_size - get the size of a long integer on the current machine
- * @pevent: a handle to the tep_handle
+ * tep_get_long_size - get the size of a long integer on the traced machine
+ * @tep: a handle to the tep_handle
  *
- * This returns the size of a long integer on the current machine
- * If @pevent is NULL, 0 is returned.
+ * This returns the size of a long integer on the traced machine
+ * If @tep is NULL, 0 is returned.
  */
-int tep_get_long_size(struct tep_handle *pevent)
+int tep_get_long_size(struct tep_handle *tep)
 {
-       if(pevent)
-               return pevent->long_size;
+       if (tep)
+               return tep->long_size;
        return 0;
 }
 
 /**
- * tep_set_long_size - set the size of a long integer on the current machine
- * @pevent: a handle to the tep_handle
+ * tep_set_long_size - set the size of a long integer on the traced machine
+ * @tep: a handle to the tep_handle
  * @size: size, in bytes, of a long integer
  *
- * This sets the size of a long integer on the current machine
+ * This sets the size of a long integer on the traced machine
  */
-void tep_set_long_size(struct tep_handle *pevent, int long_size)
+void tep_set_long_size(struct tep_handle *tep, int long_size)
 {
-       if(pevent)
-               pevent->long_size = long_size;
+       if (tep)
+               tep->long_size = long_size;
 }
 
 /**
- * tep_get_page_size - get the size of a memory page on the current machine
- * @pevent: a handle to the tep_handle
+ * tep_get_page_size - get the size of a memory page on the traced machine
+ * @tep: a handle to the tep_handle
  *
- * This returns the size of a memory page on the current machine
- * If @pevent is NULL, 0 is returned.
+ * This returns the size of a memory page on the traced machine
+ * If @tep is NULL, 0 is returned.
  */
-int tep_get_page_size(struct tep_handle *pevent)
+int tep_get_page_size(struct tep_handle *tep)
 {
-       if(pevent)
-               return pevent->page_size;
+       if (tep)
+               return tep->page_size;
        return 0;
 }
 
 /**
- * tep_set_page_size - set the size of a memory page on the current machine
- * @pevent: a handle to the tep_handle
+ * tep_set_page_size - set the size of a memory page on the traced machine
+ * @tep: a handle to the tep_handle
  * @_page_size: size of a memory page, in bytes
  *
- * This sets the size of a memory page on the current machine
+ * This sets the size of a memory page on the traced machine
  */
-void tep_set_page_size(struct tep_handle *pevent, int _page_size)
+void tep_set_page_size(struct tep_handle *tep, int _page_size)
 {
-       if(pevent)
-               pevent->page_size = _page_size;
+       if (tep)
+               tep->page_size = _page_size;
 }
 
 /**
- * tep_file_bigendian - get if the file is in big endian order
- * @pevent: a handle to the tep_handle
+ * tep_is_file_bigendian - return the endian of the file
+ * @tep: a handle to the tep_handle
  *
- * This returns if the file is in big endian order
- * If @pevent is NULL, 0 is returned.
+ * This returns true if the file is in big endian order
+ * If @tep is NULL, false is returned.
  */
-int tep_file_bigendian(struct tep_handle *pevent)
+bool tep_is_file_bigendian(struct tep_handle *tep)
 {
-       if(pevent)
-               return pevent->file_bigendian;
-       return 0;
+       if (tep)
+               return (tep->file_bigendian == TEP_BIG_ENDIAN);
+       return false;
 }
 
 /**
  * tep_set_file_bigendian - set if the file is in big endian order
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
  * @endian: non zero, if the file is in big endian order
  *
  * This sets if the file is in big endian order
  */
-void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian)
+void tep_set_file_bigendian(struct tep_handle *tep, enum tep_endian endian)
 {
-       if(pevent)
-               pevent->file_bigendian = endian;
+       if (tep)
+               tep->file_bigendian = endian;
 }
 
 /**
- * tep_is_host_bigendian - get if the order of the current host is big endian
- * @pevent: a handle to the tep_handle
+ * tep_is_local_bigendian - return the endian of the saved local machine
+ * @tep: a handle to the tep_handle
  *
- * This gets if the order of the current host is big endian
- * If @pevent is NULL, 0 is returned.
+ * This returns true if the saved local machine in @tep is big endian.
+ * If @tep is NULL, false is returned.
  */
-int tep_is_host_bigendian(struct tep_handle *pevent)
+bool tep_is_local_bigendian(struct tep_handle *tep)
 {
-       if(pevent)
-               return pevent->host_bigendian;
+       if (tep)
+               return (tep->host_bigendian == TEP_BIG_ENDIAN);
        return 0;
 }
 
 /**
- * tep_set_host_bigendian - set the order of the local host
- * @pevent: a handle to the tep_handle
+ * tep_set_local_bigendian - set the stored local machine endian order
+ * @tep: a handle to the tep_handle
  * @endian: non zero, if the local host has big endian order
  *
- * This sets the order of the local host
+ * This sets the endian order for the local machine.
  */
-void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian)
+void tep_set_local_bigendian(struct tep_handle *tep, enum tep_endian endian)
 {
-       if(pevent)
-               pevent->host_bigendian = endian;
+       if (tep)
+               tep->host_bigendian = endian;
 }
 
 /**
  * tep_is_latency_format - get if the latency output format is configured
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
  *
- * This gets if the latency output format is configured
- * If @pevent is NULL, 0 is returned.
+ * This returns true if the latency output format is configured
+ * If @tep is NULL, false is returned.
  */
-int tep_is_latency_format(struct tep_handle *pevent)
+bool tep_is_latency_format(struct tep_handle *tep)
 {
-       if(pevent)
-               return pevent->latency_format;
-       return 0;
+       if (tep)
+               return (tep->latency_format);
+       return false;
 }
 
 /**
  * tep_set_latency_format - set the latency output format
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
  * @lat: non zero for latency output format
  *
  * This sets the latency output format
   */
-void tep_set_latency_format(struct tep_handle *pevent, int lat)
+void tep_set_latency_format(struct tep_handle *tep, int lat)
+{
+       if (tep)
+               tep->latency_format = lat;
+}
+
+/**
+ * tep_is_old_format - get if an old kernel is used
+ * @tep: a handle to the tep_handle
+ *
+ * This returns true, if an old kernel is used to generate the tracing events or
+ * false if a new kernel is used. Old kernels did not have header page info.
+ * If @tep is NULL, false is returned.
+ */
+bool tep_is_old_format(struct tep_handle *tep)
+{
+       if (tep)
+               return tep->old_format;
+       return false;
+}
+
+/**
+ * tep_set_print_raw - set a flag to force print in raw format
+ * @tep: a handle to the tep_handle
+ * @print_raw: the new value of the print_raw flag
+ *
+ * This sets a flag to force print in raw format
+ */
+void tep_set_print_raw(struct tep_handle *tep, int print_raw)
+{
+       if (tep)
+               tep->print_raw = print_raw;
+}
+
+/**
+ * tep_set_test_filters - set a flag to test a filter string
+ * @tep: a handle to the tep_handle
+ * @test_filters: the new value of the test_filters flag
+ *
+ * This sets a flag to test a filter string. If this flag is set, when
+ * tep_filter_add_filter_str() API as called,it will print the filter string
+ * instead of adding it.
+ */
+void tep_set_test_filters(struct tep_handle *tep, int test_filters)
 {
-       if(pevent)
-               pevent->latency_format = lat;
+       if (tep)
+               tep->test_filters = test_filters;
 }
index 35833ee32d6c32b7b92202f006f6d8880b47bc76..09aa142f7fdd82f0d1b561bf4c6095b0c14b1f1a 100644 (file)
@@ -92,8 +92,8 @@ struct tep_handle {
 void tep_free_event(struct tep_event *event);
 void tep_free_format_field(struct tep_format_field *field);
 
-unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data);
-unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data);
-unsigned long long tep_data2host8(struct tep_handle *pevent, unsigned long long data);
+unsigned short tep_data2host2(struct tep_handle *tep, unsigned short data);
+unsigned int tep_data2host4(struct tep_handle *tep, unsigned int data);
+unsigned long long tep_data2host8(struct tep_handle *tep, unsigned long long data);
 
 #endif /* _PARSE_EVENTS_INT_H */
index 87494c7c619d85dd4199a31b7de4a2739ad678b8..b36b536a9fcbaa9ce8ee336a6c1df355fa533295 100644 (file)
@@ -148,14 +148,14 @@ struct cmdline_list {
        int                     pid;
 };
 
-static int cmdline_init(struct tep_handle *pevent)
+static int cmdline_init(struct tep_handle *tep)
 {
-       struct cmdline_list *cmdlist = pevent->cmdlist;
+       struct cmdline_list *cmdlist = tep->cmdlist;
        struct cmdline_list *item;
        struct tep_cmdline *cmdlines;
        int i;
 
-       cmdlines = malloc(sizeof(*cmdlines) * pevent->cmdline_count);
+       cmdlines = malloc(sizeof(*cmdlines) * tep->cmdline_count);
        if (!cmdlines)
                return -1;
 
@@ -169,15 +169,15 @@ static int cmdline_init(struct tep_handle *pevent)
                free(item);
        }
 
-       qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
+       qsort(cmdlines, tep->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
 
-       pevent->cmdlines = cmdlines;
-       pevent->cmdlist = NULL;
+       tep->cmdlines = cmdlines;
+       tep->cmdlist = NULL;
 
        return 0;
 }
 
-static const char *find_cmdline(struct tep_handle *pevent, int pid)
+static const char *find_cmdline(struct tep_handle *tep, int pid)
 {
        const struct tep_cmdline *comm;
        struct tep_cmdline key;
@@ -185,13 +185,13 @@ static const char *find_cmdline(struct tep_handle *pevent, int pid)
        if (!pid)
                return "<idle>";
 
-       if (!pevent->cmdlines && cmdline_init(pevent))
+       if (!tep->cmdlines && cmdline_init(tep))
                return "<not enough memory for cmdlines!>";
 
        key.pid = pid;
 
-       comm = bsearch(&key, pevent->cmdlines, pevent->cmdline_count,
-                      sizeof(*pevent->cmdlines), cmdline_cmp);
+       comm = bsearch(&key, tep->cmdlines, tep->cmdline_count,
+                      sizeof(*tep->cmdlines), cmdline_cmp);
 
        if (comm)
                return comm->comm;
@@ -199,32 +199,32 @@ static const char *find_cmdline(struct tep_handle *pevent, int pid)
 }
 
 /**
- * tep_pid_is_registered - return if a pid has a cmdline registered
- * @pevent: handle for the pevent
+ * tep_is_pid_registered - return if a pid has a cmdline registered
+ * @tep: a handle to the trace event parser context
  * @pid: The pid to check if it has a cmdline registered with.
  *
- * Returns 1 if the pid has a cmdline mapped to it
- * 0 otherwise.
+ * Returns true if the pid has a cmdline mapped to it
+ * false otherwise.
  */
-int tep_pid_is_registered(struct tep_handle *pevent, int pid)
+bool tep_is_pid_registered(struct tep_handle *tep, int pid)
 {
        const struct tep_cmdline *comm;
        struct tep_cmdline key;
 
        if (!pid)
-               return 1;
+               return true;
 
-       if (!pevent->cmdlines && cmdline_init(pevent))
-               return 0;
+       if (!tep->cmdlines && cmdline_init(tep))
+               return false;
 
        key.pid = pid;
 
-       comm = bsearch(&key, pevent->cmdlines, pevent->cmdline_count,
-                      sizeof(*pevent->cmdlines), cmdline_cmp);
+       comm = bsearch(&key, tep->cmdlines, tep->cmdline_count,
+                      sizeof(*tep->cmdlines), cmdline_cmp);
 
        if (comm)
-               return 1;
-       return 0;
+               return true;
+       return false;
 }
 
 /*
@@ -232,10 +232,10 @@ int tep_pid_is_registered(struct tep_handle *pevent, int pid)
  * we must add this pid. This is much slower than when cmdlines
  * are added before the array is initialized.
  */
-static int add_new_comm(struct tep_handle *pevent,
+static int add_new_comm(struct tep_handle *tep,
                        const char *comm, int pid, bool override)
 {
-       struct tep_cmdline *cmdlines = pevent->cmdlines;
+       struct tep_cmdline *cmdlines = tep->cmdlines;
        struct tep_cmdline *cmdline;
        struct tep_cmdline key;
        char *new_comm;
@@ -246,8 +246,8 @@ static int add_new_comm(struct tep_handle *pevent,
        /* avoid duplicates */
        key.pid = pid;
 
-       cmdline = bsearch(&key, pevent->cmdlines, pevent->cmdline_count,
-                      sizeof(*pevent->cmdlines), cmdline_cmp);
+       cmdline = bsearch(&key, tep->cmdlines, tep->cmdline_count,
+                         sizeof(*tep->cmdlines), cmdline_cmp);
        if (cmdline) {
                if (!override) {
                        errno = EEXIST;
@@ -264,37 +264,37 @@ static int add_new_comm(struct tep_handle *pevent,
                return 0;
        }
 
-       cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (pevent->cmdline_count + 1));
+       cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (tep->cmdline_count + 1));
        if (!cmdlines) {
                errno = ENOMEM;
                return -1;
        }
 
-       cmdlines[pevent->cmdline_count].comm = strdup(comm);
-       if (!cmdlines[pevent->cmdline_count].comm) {
+       cmdlines[tep->cmdline_count].comm = strdup(comm);
+       if (!cmdlines[tep->cmdline_count].comm) {
                free(cmdlines);
                errno = ENOMEM;
                return -1;
        }
 
-       cmdlines[pevent->cmdline_count].pid = pid;
+       cmdlines[tep->cmdline_count].pid = pid;
                
-       if (cmdlines[pevent->cmdline_count].comm)
-               pevent->cmdline_count++;
+       if (cmdlines[tep->cmdline_count].comm)
+               tep->cmdline_count++;
 
-       qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
-       pevent->cmdlines = cmdlines;
+       qsort(cmdlines, tep->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
+       tep->cmdlines = cmdlines;
 
        return 0;
 }
 
-static int _tep_register_comm(struct tep_handle *pevent,
+static int _tep_register_comm(struct tep_handle *tep,
                              const char *comm, int pid, bool override)
 {
        struct cmdline_list *item;
 
-       if (pevent->cmdlines)
-               return add_new_comm(pevent, comm, pid, override);
+       if (tep->cmdlines)
+               return add_new_comm(tep, comm, pid, override);
 
        item = malloc(sizeof(*item));
        if (!item)
@@ -309,17 +309,17 @@ static int _tep_register_comm(struct tep_handle *pevent,
                return -1;
        }
        item->pid = pid;
-       item->next = pevent->cmdlist;
+       item->next = tep->cmdlist;
 
-       pevent->cmdlist = item;
-       pevent->cmdline_count++;
+       tep->cmdlist = item;
+       tep->cmdline_count++;
 
        return 0;
 }
 
 /**
  * tep_register_comm - register a pid / comm mapping
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
  * @comm: the command line to register
  * @pid: the pid to map the command line to
  *
@@ -327,14 +327,14 @@ static int _tep_register_comm(struct tep_handle *pevent,
  * a given pid. The comm is duplicated. If a command with the same pid
  * already exist, -1 is returned and errno is set to EEXIST
  */
-int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
+int tep_register_comm(struct tep_handle *tep, const char *comm, int pid)
 {
-       return _tep_register_comm(pevent, comm, pid, false);
+       return _tep_register_comm(tep, comm, pid, false);
 }
 
 /**
  * tep_override_comm - register a pid / comm mapping
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
  * @comm: the command line to register
  * @pid: the pid to map the command line to
  *
@@ -342,19 +342,19 @@ int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
  * a given pid. The comm is duplicated. If a command with the same pid
  * already exist, the command string is udapted with the new one
  */
-int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid)
+int tep_override_comm(struct tep_handle *tep, const char *comm, int pid)
 {
-       if (!pevent->cmdlines && cmdline_init(pevent)) {
+       if (!tep->cmdlines && cmdline_init(tep)) {
                errno = ENOMEM;
                return -1;
        }
-       return _tep_register_comm(pevent, comm, pid, true);
+       return _tep_register_comm(tep, comm, pid, true);
 }
 
-int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock)
+int tep_register_trace_clock(struct tep_handle *tep, const char *trace_clock)
 {
-       pevent->trace_clock = strdup(trace_clock);
-       if (!pevent->trace_clock) {
+       tep->trace_clock = strdup(trace_clock);
+       if (!tep->trace_clock) {
                errno = ENOMEM;
                return -1;
        }
@@ -408,18 +408,18 @@ static int func_bcmp(const void *a, const void *b)
        return 1;
 }
 
-static int func_map_init(struct tep_handle *pevent)
+static int func_map_init(struct tep_handle *tep)
 {
        struct func_list *funclist;
        struct func_list *item;
        struct func_map *func_map;
        int i;
 
-       func_map = malloc(sizeof(*func_map) * (pevent->func_count + 1));
+       func_map = malloc(sizeof(*func_map) * (tep->func_count + 1));
        if (!func_map)
                return -1;
 
-       funclist = pevent->funclist;
+       funclist = tep->funclist;
 
        i = 0;
        while (funclist) {
@@ -432,34 +432,34 @@ static int func_map_init(struct tep_handle *pevent)
                free(item);
        }
 
-       qsort(func_map, pevent->func_count, sizeof(*func_map), func_cmp);
+       qsort(func_map, tep->func_count, sizeof(*func_map), func_cmp);
 
        /*
         * Add a special record at the end.
         */
-       func_map[pevent->func_count].func = NULL;
-       func_map[pevent->func_count].addr = 0;
-       func_map[pevent->func_count].mod = NULL;
+       func_map[tep->func_count].func = NULL;
+       func_map[tep->func_count].addr = 0;
+       func_map[tep->func_count].mod = NULL;
 
-       pevent->func_map = func_map;
-       pevent->funclist = NULL;
+       tep->func_map = func_map;
+       tep->funclist = NULL;
 
        return 0;
 }
 
 static struct func_map *
-__find_func(struct tep_handle *pevent, unsigned long long addr)
+__find_func(struct tep_handle *tep, unsigned long long addr)
 {
        struct func_map *func;
        struct func_map key;
 
-       if (!pevent->func_map)
-               func_map_init(pevent);
+       if (!tep->func_map)
+               func_map_init(tep);
 
        key.addr = addr;
 
-       func = bsearch(&key, pevent->func_map, pevent->func_count,
-                      sizeof(*pevent->func_map), func_bcmp);
+       func = bsearch(&key, tep->func_map, tep->func_count,
+                      sizeof(*tep->func_map), func_bcmp);
 
        return func;
 }
@@ -472,15 +472,14 @@ struct func_resolver {
 
 /**
  * tep_set_function_resolver - set an alternative function resolver
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
  * @resolver: function to be used
  * @priv: resolver function private state.
  *
  * Some tools may have already a way to resolve kernel functions, allow them to
- * keep using it instead of duplicating all the entries inside
- * pevent->funclist.
+ * keep using it instead of duplicating all the entries inside tep->funclist.
  */
-int tep_set_function_resolver(struct tep_handle *pevent,
+int tep_set_function_resolver(struct tep_handle *tep,
                              tep_func_resolver_t *func, void *priv)
 {
        struct func_resolver *resolver = malloc(sizeof(*resolver));
@@ -491,38 +490,38 @@ int tep_set_function_resolver(struct tep_handle *pevent,
        resolver->func = func;
        resolver->priv = priv;
 
-       free(pevent->func_resolver);
-       pevent->func_resolver = resolver;
+       free(tep->func_resolver);
+       tep->func_resolver = resolver;
 
        return 0;
 }
 
 /**
  * tep_reset_function_resolver - reset alternative function resolver
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
  *
  * Stop using whatever alternative resolver was set, use the default
  * one instead.
  */
-void tep_reset_function_resolver(struct tep_handle *pevent)
+void tep_reset_function_resolver(struct tep_handle *tep)
 {
-       free(pevent->func_resolver);
-       pevent->func_resolver = NULL;
+       free(tep->func_resolver);
+       tep->func_resolver = NULL;
 }
 
 static struct func_map *
-find_func(struct tep_handle *pevent, unsigned long long addr)
+find_func(struct tep_handle *tep, unsigned long long addr)
 {
        struct func_map *map;
 
-       if (!pevent->func_resolver)
-               return __find_func(pevent, addr);
+       if (!tep->func_resolver)
+               return __find_func(tep, addr);
 
-       map = &pevent->func_resolver->map;
+       map = &tep->func_resolver->map;
        map->mod  = NULL;
        map->addr = addr;
-       map->func = pevent->func_resolver->func(pevent->func_resolver->priv,
-                                               &map->addr, &map->mod);
+       map->func = tep->func_resolver->func(tep->func_resolver->priv,
+                                            &map->addr, &map->mod);
        if (map->func == NULL)
                return NULL;
 
@@ -531,18 +530,18 @@ find_func(struct tep_handle *pevent, unsigned long long addr)
 
 /**
  * tep_find_function - find a function by a given address
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
  * @addr: the address to find the function with
  *
  * Returns a pointer to the function stored that has the given
  * address. Note, the address does not have to be exact, it
  * will select the function that would contain the address.
  */
-const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr)
+const char *tep_find_function(struct tep_handle *tep, unsigned long long addr)
 {
        struct func_map *map;
 
-       map = find_func(pevent, addr);
+       map = find_func(tep, addr);
        if (!map)
                return NULL;
 
@@ -551,7 +550,7 @@ const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr
 
 /**
  * tep_find_function_address - find a function address by a given address
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
  * @addr: the address to find the function with
  *
  * Returns the address the function starts at. This can be used in
@@ -559,11 +558,11 @@ const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr
  * name and the function offset.
  */
 unsigned long long
-tep_find_function_address(struct tep_handle *pevent, unsigned long long addr)
+tep_find_function_address(struct tep_handle *tep, unsigned long long addr)
 {
        struct func_map *map;
 
-       map = find_func(pevent, addr);
+       map = find_func(tep, addr);
        if (!map)
                return 0;
 
@@ -572,7 +571,7 @@ tep_find_function_address(struct tep_handle *pevent, unsigned long long addr)
 
 /**
  * tep_register_function - register a function with a given address
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
  * @function: the function name to register
  * @addr: the address the function starts at
  * @mod: the kernel module the function may be in (NULL for none)
@@ -580,7 +579,7 @@ tep_find_function_address(struct tep_handle *pevent, unsigned long long addr)
  * This registers a function name with an address and module.
  * The @func passed in is duplicated.
  */
-int tep_register_function(struct tep_handle *pevent, char *func,
+int tep_register_function(struct tep_handle *tep, char *func,
                          unsigned long long addr, char *mod)
 {
        struct func_list *item = malloc(sizeof(*item));
@@ -588,7 +587,7 @@ int tep_register_function(struct tep_handle *pevent, char *func,
        if (!item)
                return -1;
 
-       item->next = pevent->funclist;
+       item->next = tep->funclist;
        item->func = strdup(func);
        if (!item->func)
                goto out_free;
@@ -601,8 +600,8 @@ int tep_register_function(struct tep_handle *pevent, char *func,
                item->mod = NULL;
        item->addr = addr;
 
-       pevent->funclist = item;
-       pevent->func_count++;
+       tep->funclist = item;
+       tep->func_count++;
 
        return 0;
 
@@ -617,23 +616,23 @@ out_free:
 
 /**
  * tep_print_funcs - print out the stored functions
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
  *
  * This prints out the stored functions.
  */
-void tep_print_funcs(struct tep_handle *pevent)
+void tep_print_funcs(struct tep_handle *tep)
 {
        int i;
 
-       if (!pevent->func_map)
-               func_map_init(pevent);
+       if (!tep->func_map)
+               func_map_init(tep);
 
-       for (i = 0; i < (int)pevent->func_count; i++) {
+       for (i = 0; i < (int)tep->func_count; i++) {
                printf("%016llx %s",
-                      pevent->func_map[i].addr,
-                      pevent->func_map[i].func);
-               if (pevent->func_map[i].mod)
-                       printf(" [%s]\n", pevent->func_map[i].mod);
+                      tep->func_map[i].addr,
+                      tep->func_map[i].func);
+               if (tep->func_map[i].mod)
+                       printf(" [%s]\n", tep->func_map[i].mod);
                else
                        printf("\n");
        }
@@ -663,18 +662,18 @@ static int printk_cmp(const void *a, const void *b)
        return 0;
 }
 
-static int printk_map_init(struct tep_handle *pevent)
+static int printk_map_init(struct tep_handle *tep)
 {
        struct printk_list *printklist;
        struct printk_list *item;
        struct printk_map *printk_map;
        int i;
 
-       printk_map = malloc(sizeof(*printk_map) * (pevent->printk_count + 1));
+       printk_map = malloc(sizeof(*printk_map) * (tep->printk_count + 1));
        if (!printk_map)
                return -1;
 
-       printklist = pevent->printklist;
+       printklist = tep->printklist;
 
        i = 0;
        while (printklist) {
@@ -686,41 +685,41 @@ static int printk_map_init(struct tep_handle *pevent)
                free(item);
        }
 
-       qsort(printk_map, pevent->printk_count, sizeof(*printk_map), printk_cmp);
+       qsort(printk_map, tep->printk_count, sizeof(*printk_map), printk_cmp);
 
-       pevent->printk_map = printk_map;
-       pevent->printklist = NULL;
+       tep->printk_map = printk_map;
+       tep->printklist = NULL;
 
        return 0;
 }
 
 static struct printk_map *
-find_printk(struct tep_handle *pevent, unsigned long long addr)
+find_printk(struct tep_handle *tep, unsigned long long addr)
 {
        struct printk_map *printk;
        struct printk_map key;
 
-       if (!pevent->printk_map && printk_map_init(pevent))
+       if (!tep->printk_map && printk_map_init(tep))
                return NULL;
 
        key.addr = addr;
 
-       printk = bsearch(&key, pevent->printk_map, pevent->printk_count,
-                        sizeof(*pevent->printk_map), printk_cmp);
+       printk = bsearch(&key, tep->printk_map, tep->printk_count,
+                        sizeof(*tep->printk_map), printk_cmp);
 
        return printk;
 }
 
 /**
  * tep_register_print_string - register a string by its address
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
  * @fmt: the string format to register
  * @addr: the address the string was located at
  *
  * This registers a string by the address it was stored in the kernel.
  * The @fmt passed in is duplicated.
  */
-int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
+int tep_register_print_string(struct tep_handle *tep, const char *fmt,
                              unsigned long long addr)
 {
        struct printk_list *item = malloc(sizeof(*item));
@@ -729,7 +728,7 @@ int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
        if (!item)
                return -1;
 
-       item->next = pevent->printklist;
+       item->next = tep->printklist;
        item->addr = addr;
 
        /* Strip off quotes and '\n' from the end */
@@ -747,8 +746,8 @@ int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
        if (strcmp(p, "\\n") == 0)
                *p = 0;
 
-       pevent->printklist = item;
-       pevent->printk_count++;
+       tep->printklist = item;
+       tep->printk_count++;
 
        return 0;
 
@@ -760,21 +759,21 @@ out_free:
 
 /**
  * tep_print_printk - print out the stored strings
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
  *
  * This prints the string formats that were stored.
  */
-void tep_print_printk(struct tep_handle *pevent)
+void tep_print_printk(struct tep_handle *tep)
 {
        int i;
 
-       if (!pevent->printk_map)
-               printk_map_init(pevent);
+       if (!tep->printk_map)
+               printk_map_init(tep);
 
-       for (i = 0; i < (int)pevent->printk_count; i++) {
+       for (i = 0; i < (int)tep->printk_count; i++) {
                printf("%016llx %s\n",
-                      pevent->printk_map[i].addr,
-                      pevent->printk_map[i].printk);
+                      tep->printk_map[i].addr,
+                      tep->printk_map[i].printk);
        }
 }
 
@@ -783,29 +782,29 @@ static struct tep_event *alloc_event(void)
        return calloc(1, sizeof(struct tep_event));
 }
 
-static int add_event(struct tep_handle *pevent, struct tep_event *event)
+static int add_event(struct tep_handle *tep, struct tep_event *event)
 {
        int i;
-       struct tep_event **events = realloc(pevent->events, sizeof(event) *
-                                           (pevent->nr_events + 1));
+       struct tep_event **events = realloc(tep->events, sizeof(event) *
+                                           (tep->nr_events + 1));
        if (!events)
                return -1;
 
-       pevent->events = events;
+       tep->events = events;
 
-       for (i = 0; i < pevent->nr_events; i++) {
-               if (pevent->events[i]->id > event->id)
+       for (i = 0; i < tep->nr_events; i++) {
+               if (tep->events[i]->id > event->id)
                        break;
        }
-       if (i < pevent->nr_events)
-               memmove(&pevent->events[i + 1],
-                       &pevent->events[i],
-                       sizeof(event) * (pevent->nr_events - i));
+       if (i < tep->nr_events)
+               memmove(&tep->events[i + 1],
+                       &tep->events[i],
+                       sizeof(event) * (tep->nr_events - i));
 
-       pevent->events[i] = event;
-       pevent->nr_events++;
+       tep->events[i] = event;
+       tep->nr_events++;
 
-       event->pevent = pevent;
+       event->tep = tep;
 
        return 0;
 }
@@ -1184,7 +1183,7 @@ static enum tep_event_type read_token(char **tok)
 }
 
 /**
- * tep_read_token - access to utilities to use the pevent parser
+ * tep_read_token - access to utilities to use the tep parser
  * @tok: The token to return
  *
  * This will parse tokens from the string given by
@@ -1657,8 +1656,8 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field **
                        else if (field->flags & TEP_FIELD_IS_STRING)
                                field->elementsize = 1;
                        else if (field->flags & TEP_FIELD_IS_LONG)
-                               field->elementsize = event->pevent ?
-                                                    event->pevent->long_size :
+                               field->elementsize = event->tep ?
+                                                    event->tep->long_size :
                                                     sizeof(long);
                } else
                        field->elementsize = field->size;
@@ -2233,7 +2232,7 @@ eval_type_str(unsigned long long val, const char *type, int pointer)
                return val & 0xffffffff;
 
        if (strcmp(type, "u64") == 0 ||
-           strcmp(type, "s64"))
+           strcmp(type, "s64") == 0)
                return val;
 
        if (strcmp(type, "s8") == 0)
@@ -2942,14 +2941,14 @@ process_bitmask(struct tep_event *event __maybe_unused, struct tep_print_arg *ar
 }
 
 static struct tep_function_handler *
-find_func_handler(struct tep_handle *pevent, char *func_name)
+find_func_handler(struct tep_handle *tep, char *func_name)
 {
        struct tep_function_handler *func;
 
-       if (!pevent)
+       if (!tep)
                return NULL;
 
-       for (func = pevent->func_handlers; func; func = func->next) {
+       for (func = tep->func_handlers; func; func = func->next) {
                if (strcmp(func->name, func_name) == 0)
                        break;
        }
@@ -2957,12 +2956,12 @@ find_func_handler(struct tep_handle *pevent, char *func_name)
        return func;
 }
 
-static void remove_func_handler(struct tep_handle *pevent, char *func_name)
+static void remove_func_handler(struct tep_handle *tep, char *func_name)
 {
        struct tep_function_handler *func;
        struct tep_function_handler **next;
 
-       next = &pevent->func_handlers;
+       next = &tep->func_handlers;
        while ((func = *next)) {
                if (strcmp(func->name, func_name) == 0) {
                        *next = func->next;
@@ -3076,7 +3075,7 @@ process_function(struct tep_event *event, struct tep_print_arg *arg,
                return process_dynamic_array_len(event, arg, tok);
        }
 
-       func = find_func_handler(event->pevent, token);
+       func = find_func_handler(event->tep, token);
        if (func) {
                free_token(token);
                return process_func_handler(event, func, arg, tok);
@@ -3357,14 +3356,14 @@ tep_find_any_field(struct tep_event *event, const char *name)
 
 /**
  * tep_read_number - read a number from data
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
  * @ptr: the raw data
  * @size: the size of the data that holds the number
  *
  * Returns the number (converted to host) from the
  * raw data.
  */
-unsigned long long tep_read_number(struct tep_handle *pevent,
+unsigned long long tep_read_number(struct tep_handle *tep,
                                   const void *ptr, int size)
 {
        unsigned long long val;
@@ -3373,12 +3372,12 @@ unsigned long long tep_read_number(struct tep_handle *pevent,
        case 1:
                return *(unsigned char *)ptr;
        case 2:
-               return tep_data2host2(pevent, *(unsigned short *)ptr);
+               return tep_data2host2(tep, *(unsigned short *)ptr);
        case 4:
-               return tep_data2host4(pevent, *(unsigned int *)ptr);
+               return tep_data2host4(tep, *(unsigned int *)ptr);
        case 8:
                memcpy(&val, (ptr), sizeof(unsigned long long));
-               return tep_data2host8(pevent, val);
+               return tep_data2host8(tep, val);
        default:
                /* BUG! */
                return 0;
@@ -3406,7 +3405,7 @@ int tep_read_number_field(struct tep_format_field *field, const void *data,
        case 2:
        case 4:
        case 8:
-               *value = tep_read_number(field->event->pevent,
+               *value = tep_read_number(field->event->tep,
                                         data + field->offset, field->size);
                return 0;
        default:
@@ -3414,7 +3413,7 @@ int tep_read_number_field(struct tep_format_field *field, const void *data,
        }
 }
 
-static int get_common_info(struct tep_handle *pevent,
+static int get_common_info(struct tep_handle *tep,
                           const char *type, int *offset, int *size)
 {
        struct tep_event *event;
@@ -3424,12 +3423,12 @@ static int get_common_info(struct tep_handle *pevent,
         * All events should have the same common elements.
         * Pick any event to find where the type is;
         */
-       if (!pevent->events) {
+       if (!tep->events) {
                do_warning("no event_list!");
                return -1;
        }
 
-       event = pevent->events[0];
+       event = tep->events[0];
        field = tep_find_common_field(event, type);
        if (!field)
                return -1;
@@ -3440,58 +3439,58 @@ static int get_common_info(struct tep_handle *pevent,
        return 0;
 }
 
-static int __parse_common(struct tep_handle *pevent, void *data,
+static int __parse_common(struct tep_handle *tep, void *data,
                          int *size, int *offset, const char *name)
 {
        int ret;
 
        if (!*size) {
-               ret = get_common_info(pevent, name, offset, size);
+               ret = get_common_info(tep, name, offset, size);
                if (ret < 0)
                        return ret;
        }
-       return tep_read_number(pevent, data + *offset, *size);
+       return tep_read_number(tep, data + *offset, *size);
 }
 
-static int trace_parse_common_type(struct tep_handle *pevent, void *data)
+static int trace_parse_common_type(struct tep_handle *tep, void *data)
 {
-       return __parse_common(pevent, data,
-                             &pevent->type_size, &pevent->type_offset,
+       return __parse_common(tep, data,
+                             &tep->type_size, &tep->type_offset,
                              "common_type");
 }
 
-static int parse_common_pid(struct tep_handle *pevent, void *data)
+static int parse_common_pid(struct tep_handle *tep, void *data)
 {
-       return __parse_common(pevent, data,
-                             &pevent->pid_size, &pevent->pid_offset,
+       return __parse_common(tep, data,
+                             &tep->pid_size, &tep->pid_offset,
                              "common_pid");
 }
 
-static int parse_common_pc(struct tep_handle *pevent, void *data)
+static int parse_common_pc(struct tep_handle *tep, void *data)
 {
-       return __parse_common(pevent, data,
-                             &pevent->pc_size, &pevent->pc_offset,
+       return __parse_common(tep, data,
+                             &tep->pc_size, &tep->pc_offset,
                              "common_preempt_count");
 }
 
-static int parse_common_flags(struct tep_handle *pevent, void *data)
+static int parse_common_flags(struct tep_handle *tep, void *data)
 {
-       return __parse_common(pevent, data,
-                             &pevent->flags_size, &pevent->flags_offset,
+       return __parse_common(tep, data,
+                             &tep->flags_size, &tep->flags_offset,
                              "common_flags");
 }
 
-static int parse_common_lock_depth(struct tep_handle *pevent, void *data)
+static int parse_common_lock_depth(struct tep_handle *tep, void *data)
 {
-       return __parse_common(pevent, data,
-                             &pevent->ld_size, &pevent->ld_offset,
+       return __parse_common(tep, data,
+                             &tep->ld_size, &tep->ld_offset,
                              "common_lock_depth");
 }
 
-static int parse_common_migrate_disable(struct tep_handle *pevent, void *data)
+static int parse_common_migrate_disable(struct tep_handle *tep, void *data)
 {
-       return __parse_common(pevent, data,
-                             &pevent->ld_size, &pevent->ld_offset,
+       return __parse_common(tep, data,
+                             &tep->ld_size, &tep->ld_offset,
                              "common_migrate_disable");
 }
 
@@ -3499,28 +3498,28 @@ static int events_id_cmp(const void *a, const void *b);
 
 /**
  * tep_find_event - find an event by given id
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @id: the id of the event
  *
  * Returns an event that has a given @id.
  */
-struct tep_event *tep_find_event(struct tep_handle *pevent, int id)
+struct tep_event *tep_find_event(struct tep_handle *tep, int id)
 {
        struct tep_event **eventptr;
        struct tep_event key;
        struct tep_event *pkey = &key;
 
        /* Check cache first */
-       if (pevent->last_event && pevent->last_event->id == id)
-               return pevent->last_event;
+       if (tep->last_event && tep->last_event->id == id)
+               return tep->last_event;
 
        key.id = id;
 
-       eventptr = bsearch(&pkey, pevent->events, pevent->nr_events,
-                          sizeof(*pevent->events), events_id_cmp);
+       eventptr = bsearch(&pkey, tep->events, tep->nr_events,
+                          sizeof(*tep->events), events_id_cmp);
 
        if (eventptr) {
-               pevent->last_event = *eventptr;
+               tep->last_event = *eventptr;
                return *eventptr;
        }
 
@@ -3529,7 +3528,7 @@ struct tep_event *tep_find_event(struct tep_handle *pevent, int id)
 
 /**
  * tep_find_event_by_name - find an event by given name
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @sys: the system name to search for
  * @name: the name of the event to search for
  *
@@ -3537,19 +3536,19 @@ struct tep_event *tep_find_event(struct tep_handle *pevent, int id)
  * @sys. If @sys is NULL the first event with @name is returned.
  */
 struct tep_event *
-tep_find_event_by_name(struct tep_handle *pevent,
+tep_find_event_by_name(struct tep_handle *tep,
                       const char *sys, const char *name)
 {
        struct tep_event *event = NULL;
        int i;
 
-       if (pevent->last_event &&
-           strcmp(pevent->last_event->name, name) == 0 &&
-           (!sys || strcmp(pevent->last_event->system, sys) == 0))
-               return pevent->last_event;
+       if (tep->last_event &&
+           strcmp(tep->last_event->name, name) == 0 &&
+           (!sys || strcmp(tep->last_event->system, sys) == 0))
+               return tep->last_event;
 
-       for (i = 0; i < pevent->nr_events; i++) {
-               event = pevent->events[i];
+       for (i = 0; i < tep->nr_events; i++) {
+               event = tep->events[i];
                if (strcmp(event->name, name) == 0) {
                        if (!sys)
                                break;
@@ -3557,17 +3556,17 @@ tep_find_event_by_name(struct tep_handle *pevent,
                                break;
                }
        }
-       if (i == pevent->nr_events)
+       if (i == tep->nr_events)
                event = NULL;
 
-       pevent->last_event = event;
+       tep->last_event = event;
        return event;
 }
 
 static unsigned long long
 eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg *arg)
 {
-       struct tep_handle *pevent = event->pevent;
+       struct tep_handle *tep = event->tep;
        unsigned long long val = 0;
        unsigned long long left, right;
        struct tep_print_arg *typearg = NULL;
@@ -3589,7 +3588,7 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg
                        
                }
                /* must be a number */
-               val = tep_read_number(pevent, data + arg->field.field->offset,
+               val = tep_read_number(tep, data + arg->field.field->offset,
                                      arg->field.field->size);
                break;
        case TEP_PRINT_FLAGS:
@@ -3629,11 +3628,11 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg
                        }
 
                        /* Default to long size */
-                       field_size = pevent->long_size;
+                       field_size = tep->long_size;
 
                        switch (larg->type) {
                        case TEP_PRINT_DYNAMIC_ARRAY:
-                               offset = tep_read_number(pevent,
+                               offset = tep_read_number(tep,
                                                   data + larg->dynarray.field->offset,
                                                   larg->dynarray.field->size);
                                if (larg->dynarray.field->elementsize)
@@ -3662,7 +3661,7 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg
                        default:
                                goto default_op; /* oops, all bets off */
                        }
-                       val = tep_read_number(pevent,
+                       val = tep_read_number(tep,
                                              data + offset, field_size);
                        if (typearg)
                                val = eval_type(val, typearg, 1);
@@ -3763,7 +3762,7 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg
                }
                break;
        case TEP_PRINT_DYNAMIC_ARRAY_LEN:
-               offset = tep_read_number(pevent,
+               offset = tep_read_number(tep,
                                         data + arg->dynarray.field->offset,
                                         arg->dynarray.field->size);
                /*
@@ -3775,7 +3774,7 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg
                break;
        case TEP_PRINT_DYNAMIC_ARRAY:
                /* Without [], we pass the address to the dynamic data */
-               offset = tep_read_number(pevent,
+               offset = tep_read_number(tep,
                                         data + arg->dynarray.field->offset,
                                         arg->dynarray.field->size);
                /*
@@ -3850,7 +3849,7 @@ static void print_str_to_seq(struct trace_seq *s, const char *format,
                trace_seq_printf(s, format, str);
 }
 
-static void print_bitmask_to_seq(struct tep_handle *pevent,
+static void print_bitmask_to_seq(struct tep_handle *tep,
                                 struct trace_seq *s, const char *format,
                                 int len_arg, const void *data, int size)
 {
@@ -3882,7 +3881,7 @@ static void print_bitmask_to_seq(struct tep_handle *pevent,
                 * In the kernel, this is an array of long words, thus
                 * endianness is very important.
                 */
-               if (pevent->file_bigendian)
+               if (tep->file_bigendian)
                        index = size - (len + 1);
                else
                        index = len;
@@ -3908,7 +3907,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                          struct tep_event *event, const char *format,
                          int len_arg, struct tep_print_arg *arg)
 {
-       struct tep_handle *pevent = event->pevent;
+       struct tep_handle *tep = event->tep;
        struct tep_print_flag_sym *flag;
        struct tep_format_field *field;
        struct printk_map *printk;
@@ -3945,7 +3944,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                 * is a pointer.
                 */
                if (!(field->flags & TEP_FIELD_IS_ARRAY) &&
-                   field->size == pevent->long_size) {
+                   field->size == tep->long_size) {
 
                        /* Handle heterogeneous recording and processing
                         * architectures
@@ -3960,12 +3959,12 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                         * on 32-bit devices:
                         * In this case, 64 bits must be read.
                         */
-                       addr = (pevent->long_size == 8) ?
+                       addr = (tep->long_size == 8) ?
                                *(unsigned long long *)(data + field->offset) :
                                (unsigned long long)*(unsigned int *)(data + field->offset);
 
                        /* Check if it matches a print format */
-                       printk = find_printk(pevent, addr);
+                       printk = find_printk(tep, addr);
                        if (printk)
                                trace_seq_puts(s, printk->printk);
                        else
@@ -4022,7 +4021,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
        case TEP_PRINT_HEX_STR:
                if (arg->hex.field->type == TEP_PRINT_DYNAMIC_ARRAY) {
                        unsigned long offset;
-                       offset = tep_read_number(pevent,
+                       offset = tep_read_number(tep,
                                data + arg->hex.field->dynarray.field->offset,
                                arg->hex.field->dynarray.field->size);
                        hex = data + (offset & 0xffff);
@@ -4053,7 +4052,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                        unsigned long offset;
                        struct tep_format_field *field =
                                arg->int_array.field->dynarray.field;
-                       offset = tep_read_number(pevent,
+                       offset = tep_read_number(tep,
                                                 data + field->offset,
                                                 field->size);
                        num = data + (offset & 0xffff);
@@ -4104,7 +4103,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                        f = tep_find_any_field(event, arg->string.string);
                        arg->string.offset = f->offset;
                }
-               str_offset = tep_data2host4(pevent, *(unsigned int *)(data + arg->string.offset));
+               str_offset = tep_data2host4(tep, *(unsigned int *)(data + arg->string.offset));
                str_offset &= 0xffff;
                print_str_to_seq(s, format, len_arg, ((char *)data) + str_offset);
                break;
@@ -4122,10 +4121,10 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                        f = tep_find_any_field(event, arg->bitmask.bitmask);
                        arg->bitmask.offset = f->offset;
                }
-               bitmask_offset = tep_data2host4(pevent, *(unsigned int *)(data + arg->bitmask.offset));
+               bitmask_offset = tep_data2host4(tep, *(unsigned int *)(data + arg->bitmask.offset));
                bitmask_size = bitmask_offset >> 16;
                bitmask_offset &= 0xffff;
-               print_bitmask_to_seq(pevent, s, format, len_arg,
+               print_bitmask_to_seq(tep, s, format, len_arg,
                                     data + bitmask_offset, bitmask_size);
                break;
        }
@@ -4257,7 +4256,7 @@ static void free_args(struct tep_print_arg *args)
 
 static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, struct tep_event *event)
 {
-       struct tep_handle *pevent = event->pevent;
+       struct tep_handle *tep = event->tep;
        struct tep_format_field *field, *ip_field;
        struct tep_print_arg *args, *arg, **next;
        unsigned long long ip, val;
@@ -4265,8 +4264,8 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
        void *bptr;
        int vsize = 0;
 
-       field = pevent->bprint_buf_field;
-       ip_field = pevent->bprint_ip_field;
+       field = tep->bprint_buf_field;
+       ip_field = tep->bprint_ip_field;
 
        if (!field) {
                field = tep_find_field(event, "buf");
@@ -4279,11 +4278,11 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
                        do_warning_event(event, "can't find ip field for binary printk");
                        return NULL;
                }
-               pevent->bprint_buf_field = field;
-               pevent->bprint_ip_field = ip_field;
+               tep->bprint_buf_field = field;
+               tep->bprint_ip_field = ip_field;
        }
 
-       ip = tep_read_number(pevent, data + ip_field->offset, ip_field->size);
+       ip = tep_read_number(tep, data + ip_field->offset, ip_field->size);
 
        /*
         * The first arg is the IP pointer.
@@ -4338,6 +4337,7 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
                                        case 'S':
                                        case 'f':
                                        case 'F':
+                                       case 'x':
                                                break;
                                        default:
                                                /*
@@ -4360,7 +4360,7 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
                                        vsize = 4;
                                        break;
                                case 1:
-                                       vsize = pevent->long_size;
+                                       vsize = tep->long_size;
                                        break;
                                case 2:
                                        vsize = 8;
@@ -4377,7 +4377,7 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
                                /* the pointers are always 4 bytes aligned */
                                bptr = (void *)(((unsigned long)bptr + 3) &
                                                ~3);
-                               val = tep_read_number(pevent, bptr, vsize);
+                               val = tep_read_number(tep, bptr, vsize);
                                bptr += vsize;
                                arg = alloc_arg();
                                if (!arg) {
@@ -4434,13 +4434,13 @@ static char *
 get_bprint_format(void *data, int size __maybe_unused,
                  struct tep_event *event)
 {
-       struct tep_handle *pevent = event->pevent;
+       struct tep_handle *tep = event->tep;
        unsigned long long addr;
        struct tep_format_field *field;
        struct printk_map *printk;
        char *format;
 
-       field = pevent->bprint_fmt_field;
+       field = tep->bprint_fmt_field;
 
        if (!field) {
                field = tep_find_field(event, "fmt");
@@ -4448,12 +4448,12 @@ get_bprint_format(void *data, int size __maybe_unused,
                        do_warning_event(event, "can't find format field for binary printk");
                        return NULL;
                }
-               pevent->bprint_fmt_field = field;
+               tep->bprint_fmt_field = field;
        }
 
-       addr = tep_read_number(pevent, data + field->offset, field->size);
+       addr = tep_read_number(tep, data + field->offset, field->size);
 
-       printk = find_printk(pevent, addr);
+       printk = find_printk(tep, addr);
        if (!printk) {
                if (asprintf(&format, "%%pf: (NO FORMAT FOUND at %llx)\n", addr) < 0)
                        return NULL;
@@ -4835,13 +4835,13 @@ void tep_print_field(struct trace_seq *s, void *data,
 {
        unsigned long long val;
        unsigned int offset, len, i;
-       struct tep_handle *pevent = field->event->pevent;
+       struct tep_handle *tep = field->event->tep;
 
        if (field->flags & TEP_FIELD_IS_ARRAY) {
                offset = field->offset;
                len = field->size;
                if (field->flags & TEP_FIELD_IS_DYNAMIC) {
-                       val = tep_read_number(pevent, data + offset, len);
+                       val = tep_read_number(tep, data + offset, len);
                        offset = val;
                        len = offset >> 16;
                        offset &= 0xffff;
@@ -4861,7 +4861,7 @@ void tep_print_field(struct trace_seq *s, void *data,
                        field->flags &= ~TEP_FIELD_IS_STRING;
                }
        } else {
-               val = tep_read_number(pevent, data + field->offset,
+               val = tep_read_number(tep, data + field->offset,
                                      field->size);
                if (field->flags & TEP_FIELD_IS_POINTER) {
                        trace_seq_printf(s, "0x%llx", val);
@@ -4910,7 +4910,7 @@ void tep_print_fields(struct trace_seq *s, void *data,
 
 static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_event *event)
 {
-       struct tep_handle *pevent = event->pevent;
+       struct tep_handle *tep = event->tep;
        struct tep_print_fmt *print_fmt = &event->print_fmt;
        struct tep_print_arg *arg = print_fmt->args;
        struct tep_print_arg *args = NULL;
@@ -5002,7 +5002,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
                        case '-':
                                goto cont_process;
                        case 'p':
-                               if (pevent->long_size == 4)
+                               if (tep->long_size == 4)
                                        ls = 1;
                                else
                                        ls = 2;
@@ -5063,7 +5063,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
                                arg = arg->next;
 
                                if (show_func) {
-                                       func = find_func(pevent, val);
+                                       func = find_func(tep, val);
                                        if (func) {
                                                trace_seq_puts(s, func->func);
                                                if (show_func == 'F')
@@ -5073,7 +5073,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
                                                break;
                                        }
                                }
-                               if (pevent->long_size == 8 && ls == 1 &&
+                               if (tep->long_size == 8 && ls == 1 &&
                                    sizeof(long) != 8) {
                                        char *p;
 
@@ -5171,8 +5171,8 @@ out_failed:
 }
 
 /**
- * tep_data_lat_fmt - parse the data for the latency format
- * @pevent: a handle to the pevent
+ * tep_data_latency_format - parse the data for the latency format
+ * @tep: a handle to the trace event parser context
  * @s: the trace_seq to write to
  * @record: the record to read from
  *
@@ -5180,8 +5180,8 @@ out_failed:
  * need rescheduling, in hard/soft interrupt, preempt count
  * and lock depth) and places it into the trace_seq.
  */
-void tep_data_lat_fmt(struct tep_handle *pevent,
-                     struct trace_seq *s, struct tep_record *record)
+void tep_data_latency_format(struct tep_handle *tep,
+                            struct trace_seq *s, struct tep_record *record)
 {
        static int check_lock_depth = 1;
        static int check_migrate_disable = 1;
@@ -5195,13 +5195,13 @@ void tep_data_lat_fmt(struct tep_handle *pevent,
        int softirq;
        void *data = record->data;
 
-       lat_flags = parse_common_flags(pevent, data);
-       pc = parse_common_pc(pevent, data);
+       lat_flags = parse_common_flags(tep, data);
+       pc = parse_common_pc(tep, data);
        /* lock_depth may not always exist */
        if (lock_depth_exists)
-               lock_depth = parse_common_lock_depth(pevent, data);
+               lock_depth = parse_common_lock_depth(tep, data);
        else if (check_lock_depth) {
-               lock_depth = parse_common_lock_depth(pevent, data);
+               lock_depth = parse_common_lock_depth(tep, data);
                if (lock_depth < 0)
                        check_lock_depth = 0;
                else
@@ -5210,9 +5210,9 @@ void tep_data_lat_fmt(struct tep_handle *pevent,
 
        /* migrate_disable may not always exist */
        if (migrate_disable_exists)
-               migrate_disable = parse_common_migrate_disable(pevent, data);
+               migrate_disable = parse_common_migrate_disable(tep, data);
        else if (check_migrate_disable) {
-               migrate_disable = parse_common_migrate_disable(pevent, data);
+               migrate_disable = parse_common_migrate_disable(tep, data);
                if (migrate_disable < 0)
                        check_migrate_disable = 0;
                else
@@ -5255,79 +5255,79 @@ void tep_data_lat_fmt(struct tep_handle *pevent,
 
 /**
  * tep_data_type - parse out the given event type
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @rec: the record to read from
  *
  * This returns the event id from the @rec.
  */
-int tep_data_type(struct tep_handle *pevent, struct tep_record *rec)
+int tep_data_type(struct tep_handle *tep, struct tep_record *rec)
 {
-       return trace_parse_common_type(pevent, rec->data);
+       return trace_parse_common_type(tep, rec->data);
 }
 
 /**
  * tep_data_pid - parse the PID from record
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @rec: the record to parse
  *
  * This returns the PID from a record.
  */
-int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec)
+int tep_data_pid(struct tep_handle *tep, struct tep_record *rec)
 {
-       return parse_common_pid(pevent, rec->data);
+       return parse_common_pid(tep, rec->data);
 }
 
 /**
  * tep_data_preempt_count - parse the preempt count from the record
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @rec: the record to parse
  *
  * This returns the preempt count from a record.
  */
-int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec)
+int tep_data_preempt_count(struct tep_handle *tep, struct tep_record *rec)
 {
-       return parse_common_pc(pevent, rec->data);
+       return parse_common_pc(tep, rec->data);
 }
 
 /**
  * tep_data_flags - parse the latency flags from the record
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @rec: the record to parse
  *
  * This returns the latency flags from a record.
  *
  *  Use trace_flag_type enum for the flags (see event-parse.h).
  */
-int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec)
+int tep_data_flags(struct tep_handle *tep, struct tep_record *rec)
 {
-       return parse_common_flags(pevent, rec->data);
+       return parse_common_flags(tep, rec->data);
 }
 
 /**
  * tep_data_comm_from_pid - return the command line from PID
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @pid: the PID of the task to search for
  *
  * This returns a pointer to the command line that has the given
  * @pid.
  */
-const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid)
+const char *tep_data_comm_from_pid(struct tep_handle *tep, int pid)
 {
        const char *comm;
 
-       comm = find_cmdline(pevent, pid);
+       comm = find_cmdline(tep, pid);
        return comm;
 }
 
 static struct tep_cmdline *
-pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline *next)
+pid_from_cmdlist(struct tep_handle *tep, const char *comm, struct tep_cmdline *next)
 {
        struct cmdline_list *cmdlist = (struct cmdline_list *)next;
 
        if (cmdlist)
                cmdlist = cmdlist->next;
        else
-               cmdlist = pevent->cmdlist;
+               cmdlist = tep->cmdlist;
 
        while (cmdlist && strcmp(cmdlist->comm, comm) != 0)
                cmdlist = cmdlist->next;
@@ -5337,7 +5337,7 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline
 
 /**
  * tep_data_pid_from_comm - return the pid from a given comm
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @comm: the cmdline to find the pid from
  * @next: the cmdline structure to find the next comm
  *
@@ -5348,7 +5348,7 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline
  * next pid.
  * Also, it does a linear search, so it may be slow.
  */
-struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
+struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *tep, const char *comm,
                                           struct tep_cmdline *next)
 {
        struct tep_cmdline *cmdline;
@@ -5357,25 +5357,25 @@ struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char
         * If the cmdlines have not been converted yet, then use
         * the list.
         */
-       if (!pevent->cmdlines)
-               return pid_from_cmdlist(pevent, comm, next);
+       if (!tep->cmdlines)
+               return pid_from_cmdlist(tep, comm, next);
 
        if (next) {
                /*
                 * The next pointer could have been still from
                 * a previous call before cmdlines were created
                 */
-               if (next < pevent->cmdlines ||
-                   next >= pevent->cmdlines + pevent->cmdline_count)
+               if (next < tep->cmdlines ||
+                   next >= tep->cmdlines + tep->cmdline_count)
                        next = NULL;
                else
                        cmdline  = next++;
        }
 
        if (!next)
-               cmdline = pevent->cmdlines;
+               cmdline = tep->cmdlines;
 
-       while (cmdline < pevent->cmdlines + pevent->cmdline_count) {
+       while (cmdline < tep->cmdlines + tep->cmdline_count) {
                if (strcmp(cmdline->comm, comm) == 0)
                        return cmdline;
                cmdline++;
@@ -5385,12 +5385,13 @@ struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char
 
 /**
  * tep_cmdline_pid - return the pid associated to a given cmdline
+ * @tep: a handle to the trace event parser context
  * @cmdline: The cmdline structure to get the pid from
  *
  * Returns the pid for a give cmdline. If @cmdline is NULL, then
  * -1 is returned.
  */
-int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline)
+int tep_cmdline_pid(struct tep_handle *tep, struct tep_cmdline *cmdline)
 {
        struct cmdline_list *cmdlist = (struct cmdline_list *)cmdline;
 
@@ -5401,9 +5402,9 @@ int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline)
         * If cmdlines have not been created yet, or cmdline is
         * not part of the array, then treat it as a cmdlist instead.
         */
-       if (!pevent->cmdlines ||
-           cmdline < pevent->cmdlines ||
-           cmdline >= pevent->cmdlines + pevent->cmdline_count)
+       if (!tep->cmdlines ||
+           cmdline < tep->cmdlines ||
+           cmdline >= tep->cmdlines + tep->cmdline_count)
                return cmdlist->pid;
 
        return cmdline->pid;
@@ -5423,7 +5424,7 @@ void tep_event_info(struct trace_seq *s, struct tep_event *event,
 {
        int print_pretty = 1;
 
-       if (event->pevent->print_raw || (event->flags & TEP_EVENT_FL_PRINTRAW))
+       if (event->tep->print_raw || (event->flags & TEP_EVENT_FL_PRINTRAW))
                tep_print_fields(s, record->data, record->size, event);
        else {
 
@@ -5444,7 +5445,8 @@ static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock)
                return true;
 
        if (!strcmp(trace_clock, "local") || !strcmp(trace_clock, "global")
-           || !strcmp(trace_clock, "uptime") || !strcmp(trace_clock, "perf"))
+           || !strcmp(trace_clock, "uptime") || !strcmp(trace_clock, "perf")
+           || !strncmp(trace_clock, "mono", 4))
                return true;
 
        /* trace_clock is setting in tsc or counter mode */
@@ -5453,14 +5455,14 @@ static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock)
 
 /**
  * tep_find_event_by_record - return the event from a given record
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @record: The record to get the event from
  *
  * Returns the associated event for a given record, or NULL if non is
  * is found.
  */
 struct tep_event *
-tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record)
+tep_find_event_by_record(struct tep_handle *tep, struct tep_record *record)
 {
        int type;
 
@@ -5469,21 +5471,21 @@ tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record)
                return NULL;
        }
 
-       type = trace_parse_common_type(pevent, record->data);
+       type = trace_parse_common_type(tep, record->data);
 
-       return tep_find_event(pevent, type);
+       return tep_find_event(tep, type);
 }
 
 /**
  * tep_print_event_task - Write the event task comm, pid and CPU
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @s: the trace_seq to write to
  * @event: the handle to the record's event
  * @record: The record to get the event from
  *
  * Writes the tasks comm, pid and CPU to @s.
  */
-void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_task(struct tep_handle *tep, struct trace_seq *s,
                          struct tep_event *event,
                          struct tep_record *record)
 {
@@ -5491,27 +5493,26 @@ void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
        const char *comm;
        int pid;
 
-       pid = parse_common_pid(pevent, data);
-       comm = find_cmdline(pevent, pid);
+       pid = parse_common_pid(tep, data);
+       comm = find_cmdline(tep, pid);
 
-       if (pevent->latency_format) {
-               trace_seq_printf(s, "%8.8s-%-5d %3d",
-                      comm, pid, record->cpu);
-       } else
+       if (tep->latency_format)
+               trace_seq_printf(s, "%8.8s-%-5d %3d", comm, pid, record->cpu);
+       else
                trace_seq_printf(s, "%16s-%-5d [%03d]", comm, pid, record->cpu);
 }
 
 /**
  * tep_print_event_time - Write the event timestamp
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @s: the trace_seq to write to
  * @event: the handle to the record's event
  * @record: The record to get the event from
- * @use_trace_clock: Set to parse according to the @pevent->trace_clock
+ * @use_trace_clock: Set to parse according to the @tep->trace_clock
  *
  * Writes the timestamp of the record into @s.
  */
-void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_time(struct tep_handle *tep, struct trace_seq *s,
                          struct tep_event *event,
                          struct tep_record *record,
                          bool use_trace_clock)
@@ -5522,19 +5523,18 @@ void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
        int p;
        bool use_usec_format;
 
-       use_usec_format = is_timestamp_in_us(pevent->trace_clock,
-                                                       use_trace_clock);
+       use_usec_format = is_timestamp_in_us(tep->trace_clock, use_trace_clock);
        if (use_usec_format) {
                secs = record->ts / NSEC_PER_SEC;
                nsecs = record->ts - secs * NSEC_PER_SEC;
        }
 
-       if (pevent->latency_format) {
-               tep_data_lat_fmt(pevent, s, record);
+       if (tep->latency_format) {
+               tep_data_latency_format(tep, s, record);
        }
 
        if (use_usec_format) {
-               if (pevent->flags & TEP_NSEC_OUTPUT) {
+               if (tep->flags & TEP_NSEC_OUTPUT) {
                        usecs = nsecs;
                        p = 9;
                } else {
@@ -5554,14 +5554,14 @@ void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
 
 /**
  * tep_print_event_data - Write the event data section
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @s: the trace_seq to write to
  * @event: the handle to the record's event
  * @record: The record to get the event from
  *
  * Writes the parsing of the record's data to @s.
  */
-void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_data(struct tep_handle *tep, struct trace_seq *s,
                          struct tep_event *event,
                          struct tep_record *record)
 {
@@ -5578,15 +5578,15 @@ void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
        tep_event_info(s, event, record);
 }
 
-void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event(struct tep_handle *tep, struct trace_seq *s,
                     struct tep_record *record, bool use_trace_clock)
 {
        struct tep_event *event;
 
-       event = tep_find_event_by_record(pevent, record);
+       event = tep_find_event_by_record(tep, record);
        if (!event) {
                int i;
-               int type = trace_parse_common_type(pevent, record->data);
+               int type = trace_parse_common_type(tep, record->data);
 
                do_warning("ug! no event found for type %d", type);
                trace_seq_printf(s, "[UNKNOWN TYPE %d]", type);
@@ -5596,9 +5596,9 @@ void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
                return;
        }
 
-       tep_print_event_task(pevent, s, event, record);
-       tep_print_event_time(pevent, s, event, record, use_trace_clock);
-       tep_print_event_data(pevent, s, event, record);
+       tep_print_event_task(tep, s, event, record);
+       tep_print_event_time(tep, s, event, record, use_trace_clock);
+       tep_print_event_data(tep, s, event, record);
 }
 
 static int events_id_cmp(const void *a, const void *b)
@@ -5649,32 +5649,26 @@ static int events_system_cmp(const void *a, const void *b)
        return events_id_cmp(a, b);
 }
 
-struct tep_event **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type sort_type)
+static struct tep_event **list_events_copy(struct tep_handle *tep)
 {
        struct tep_event **events;
-       int (*sort)(const void *a, const void *b);
-
-       events = pevent->sort_events;
-
-       if (events && pevent->last_type == sort_type)
-               return events;
 
-       if (!events) {
-               events = malloc(sizeof(*events) * (pevent->nr_events + 1));
-               if (!events)
-                       return NULL;
+       if (!tep)
+               return NULL;
 
-               memcpy(events, pevent->events, sizeof(*events) * pevent->nr_events);
-               events[pevent->nr_events] = NULL;
+       events = malloc(sizeof(*events) * (tep->nr_events + 1));
+       if (!events)
+               return NULL;
 
-               pevent->sort_events = events;
+       memcpy(events, tep->events, sizeof(*events) * tep->nr_events);
+       events[tep->nr_events] = NULL;
+       return events;
+}
 
-               /* the internal events are sorted by id */
-               if (sort_type == TEP_EVENT_SORT_ID) {
-                       pevent->last_type = sort_type;
-                       return events;
-               }
-       }
+static void list_events_sort(struct tep_event **events, int nr_events,
+                            enum tep_event_sort_type sort_type)
+{
+       int (*sort)(const void *a, const void *b);
 
        switch (sort_type) {
        case TEP_EVENT_SORT_ID:
@@ -5687,11 +5681,82 @@ struct tep_event **tep_list_events(struct tep_handle *pevent, enum tep_event_sor
                sort = events_system_cmp;
                break;
        default:
+               sort = NULL;
+       }
+
+       if (sort)
+               qsort(events, nr_events, sizeof(*events), sort);
+}
+
+/**
+ * tep_list_events - Get events, sorted by given criteria.
+ * @tep: a handle to the tep context
+ * @sort_type: desired sort order of the events in the array
+ *
+ * Returns an array of pointers to all events, sorted by the given
+ * @sort_type criteria. The last element of the array is NULL. The returned
+ * memory must not be freed, it is managed by the library.
+ * The function is not thread safe.
+ */
+struct tep_event **tep_list_events(struct tep_handle *tep,
+                                  enum tep_event_sort_type sort_type)
+{
+       struct tep_event **events;
+
+       if (!tep)
+               return NULL;
+
+       events = tep->sort_events;
+       if (events && tep->last_type == sort_type)
                return events;
+
+       if (!events) {
+               events = list_events_copy(tep);
+               if (!events)
+                       return NULL;
+
+               tep->sort_events = events;
+
+               /* the internal events are sorted by id */
+               if (sort_type == TEP_EVENT_SORT_ID) {
+                       tep->last_type = sort_type;
+                       return events;
+               }
        }
 
-       qsort(events, pevent->nr_events, sizeof(*events), sort);
-       pevent->last_type = sort_type;
+       list_events_sort(events, tep->nr_events, sort_type);
+       tep->last_type = sort_type;
+
+       return events;
+}
+
+
+/**
+ * tep_list_events_copy - Thread safe version of tep_list_events()
+ * @tep: a handle to the tep context
+ * @sort_type: desired sort order of the events in the array
+ *
+ * Returns an array of pointers to all events, sorted by the given
+ * @sort_type criteria. The last element of the array is NULL. The returned
+ * array is newly allocated inside the function and must be freed by the caller
+ */
+struct tep_event **tep_list_events_copy(struct tep_handle *tep,
+                                       enum tep_event_sort_type sort_type)
+{
+       struct tep_event **events;
+
+       if (!tep)
+               return NULL;
+
+       events = list_events_copy(tep);
+       if (!events)
+               return NULL;
+
+       /* the internal events are sorted by id */
+       if (sort_type == TEP_EVENT_SORT_ID)
+               return events;
+
+       list_events_sort(events, tep->nr_events, sort_type);
 
        return events;
 }
@@ -5950,7 +6015,7 @@ static void parse_header_field(const char *field,
 
 /**
  * tep_parse_header_page - parse the data stored in the header page
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @buf: the buffer storing the header page format string
  * @size: the size of @buf
  * @long_size: the long size to use if there is no header
@@ -5960,7 +6025,7 @@ static void parse_header_field(const char *field,
  *
  * /sys/kernel/debug/tracing/events/header_page
  */
-int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long size,
+int tep_parse_header_page(struct tep_handle *tep, char *buf, unsigned long size,
                          int long_size)
 {
        int ignore;
@@ -5970,22 +6035,22 @@ int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long si
                 * Old kernels did not have header page info.
                 * Sorry but we just use what we find here in user space.
                 */
-               pevent->header_page_ts_size = sizeof(long long);
-               pevent->header_page_size_size = long_size;
-               pevent->header_page_data_offset = sizeof(long long) + long_size;
-               pevent->old_format = 1;
+               tep->header_page_ts_size = sizeof(long long);
+               tep->header_page_size_size = long_size;
+               tep->header_page_data_offset = sizeof(long long) + long_size;
+               tep->old_format = 1;
                return -1;
        }
        init_input_buf(buf, size);
 
-       parse_header_field("timestamp", &pevent->header_page_ts_offset,
-                          &pevent->header_page_ts_size, 1);
-       parse_header_field("commit", &pevent->header_page_size_offset,
-                          &pevent->header_page_size_size, 1);
-       parse_header_field("overwrite", &pevent->header_page_overwrite,
+       parse_header_field("timestamp", &tep->header_page_ts_offset,
+                          &tep->header_page_ts_size, 1);
+       parse_header_field("commit", &tep->header_page_size_offset,
+                          &tep->header_page_size_size, 1);
+       parse_header_field("overwrite", &tep->header_page_overwrite,
                           &ignore, 0);
-       parse_header_field("data", &pevent->header_page_data_offset,
-                          &pevent->header_page_data_size, 1);
+       parse_header_field("data", &tep->header_page_data_offset,
+                          &tep->header_page_data_size, 1);
 
        return 0;
 }
@@ -6013,11 +6078,11 @@ static void free_handler(struct event_handler *handle)
        free(handle);
 }
 
-static int find_event_handle(struct tep_handle *pevent, struct tep_event *event)
+static int find_event_handle(struct tep_handle *tep, struct tep_event *event)
 {
        struct event_handler *handle, **next;
 
-       for (next = &pevent->handlers; *next;
+       for (next = &tep->handlers; *next;
             next = &(*next)->next) {
                handle = *next;
                if (event_matches(event, handle->id,
@@ -6055,7 +6120,7 @@ static int find_event_handle(struct tep_handle *pevent, struct tep_event *event)
  * /sys/kernel/debug/tracing/events/.../.../format
  */
 enum tep_errno __tep_parse_format(struct tep_event **eventp,
-                                 struct tep_handle *pevent, const char *buf,
+                                 struct tep_handle *tep, const char *buf,
                                  unsigned long size, const char *sys)
 {
        struct tep_event *event;
@@ -6097,8 +6162,8 @@ enum tep_errno __tep_parse_format(struct tep_event **eventp,
                goto event_alloc_failed;
        }
 
-       /* Add pevent to event so that it can be referenced */
-       event->pevent = pevent;
+       /* Add tep to event so that it can be referenced */
+       event->tep = tep;
 
        ret = event_read_format(event);
        if (ret < 0) {
@@ -6110,7 +6175,7 @@ enum tep_errno __tep_parse_format(struct tep_event **eventp,
         * If the event has an override, don't print warnings if the event
         * print format fails to parse.
         */
-       if (pevent && find_event_handle(pevent, event))
+       if (tep && find_event_handle(tep, event))
                show_warning = 0;
 
        ret = event_read_print(event);
@@ -6162,18 +6227,18 @@ enum tep_errno __tep_parse_format(struct tep_event **eventp,
 }
 
 static enum tep_errno
-__parse_event(struct tep_handle *pevent,
+__parse_event(struct tep_handle *tep,
              struct tep_event **eventp,
              const char *buf, unsigned long size,
              const char *sys)
 {
-       int ret = __tep_parse_format(eventp, pevent, buf, size, sys);
+       int ret = __tep_parse_format(eventp, tep, buf, size, sys);
        struct tep_event *event = *eventp;
 
        if (event == NULL)
                return ret;
 
-       if (pevent && add_event(pevent, event)) {
+       if (tep && add_event(tep, event)) {
                ret = TEP_ERRNO__MEM_ALLOC_FAILED;
                goto event_add_failed;
        }
@@ -6191,7 +6256,7 @@ event_add_failed:
 
 /**
  * tep_parse_format - parse the event format
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @eventp: returned format
  * @buf: the buffer storing the event format string
  * @size: the size of @buf
@@ -6204,17 +6269,17 @@ event_add_failed:
  *
  * /sys/kernel/debug/tracing/events/.../.../format
  */
-enum tep_errno tep_parse_format(struct tep_handle *pevent,
+enum tep_errno tep_parse_format(struct tep_handle *tep,
                                struct tep_event **eventp,
                                const char *buf,
                                unsigned long size, const char *sys)
 {
-       return __parse_event(pevent, eventp, buf, size, sys);
+       return __parse_event(tep, eventp, buf, size, sys);
 }
 
 /**
  * tep_parse_event - parse the event format
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @buf: the buffer storing the event format string
  * @size: the size of @buf
  * @sys: the system the event belongs to
@@ -6226,11 +6291,11 @@ enum tep_errno tep_parse_format(struct tep_handle *pevent,
  *
  * /sys/kernel/debug/tracing/events/.../.../format
  */
-enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf,
+enum tep_errno tep_parse_event(struct tep_handle *tep, const char *buf,
                               unsigned long size, const char *sys)
 {
        struct tep_event *event = NULL;
-       return __parse_event(pevent, &event, buf, size, sys);
+       return __parse_event(tep, &event, buf, size, sys);
 }
 
 int get_field_val(struct trace_seq *s, struct tep_format_field *field,
@@ -6292,8 +6357,8 @@ void *tep_get_field_raw(struct trace_seq *s, struct tep_event *event,
 
        offset = field->offset;
        if (field->flags & TEP_FIELD_IS_DYNAMIC) {
-               offset = tep_read_number(event->pevent,
-                                           data + offset, field->size);
+               offset = tep_read_number(event->tep,
+                                        data + offset, field->size);
                *len = offset >> 16;
                offset &= 0xffff;
        } else
@@ -6386,7 +6451,8 @@ int tep_get_any_field_val(struct trace_seq *s, struct tep_event *event,
  * @record: The record with the field name.
  * @err: print default error if failed.
  *
- * Returns: 0 on success, -1 field not found, or 1 if buffer is full.
+ * Returns positive value on success, negative in case of an error,
+ * or 0 if buffer is full.
  */
 int tep_print_num_field(struct trace_seq *s, const char *fmt,
                        struct tep_event *event, const char *name,
@@ -6418,14 +6484,15 @@ int tep_print_num_field(struct trace_seq *s, const char *fmt,
  * @record: The record with the field name.
  * @err: print default error if failed.
  *
- * Returns: 0 on success, -1 field not found, or 1 if buffer is full.
+ * Returns positive value on success, negative in case of an error,
+ * or 0 if buffer is full.
  */
 int tep_print_func_field(struct trace_seq *s, const char *fmt,
                         struct tep_event *event, const char *name,
                         struct tep_record *record, int err)
 {
        struct tep_format_field *field = tep_find_field(event, name);
-       struct tep_handle *pevent = event->pevent;
+       struct tep_handle *tep = event->tep;
        unsigned long long val;
        struct func_map *func;
        char tmp[128];
@@ -6436,7 +6503,7 @@ int tep_print_func_field(struct trace_seq *s, const char *fmt,
        if (tep_read_number_field(field, record->data, &val))
                goto failed;
 
-       func = find_func(pevent, val);
+       func = find_func(tep, val);
 
        if (func)
                snprintf(tmp, 128, "%s/0x%llx", func->func, func->addr - val);
@@ -6468,7 +6535,7 @@ static void free_func_handle(struct tep_function_handler *func)
 
 /**
  * tep_register_print_function - register a helper function
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @func: the function to process the helper function
  * @ret_type: the return type of the helper function
  * @name: the name of the helper function
@@ -6481,7 +6548,7 @@ static void free_func_handle(struct tep_function_handler *func)
  * The @parameters is a variable list of tep_func_arg_type enums that
  * must end with TEP_FUNC_ARG_VOID.
  */
-int tep_register_print_function(struct tep_handle *pevent,
+int tep_register_print_function(struct tep_handle *tep,
                                tep_func_handler func,
                                enum tep_func_arg_type ret_type,
                                char *name, ...)
@@ -6493,7 +6560,7 @@ int tep_register_print_function(struct tep_handle *pevent,
        va_list ap;
        int ret;
 
-       func_handle = find_func_handler(pevent, name);
+       func_handle = find_func_handler(tep, name);
        if (func_handle) {
                /*
                 * This is most like caused by the users own
@@ -6501,7 +6568,7 @@ int tep_register_print_function(struct tep_handle *pevent,
                 * system defaults.
                 */
                pr_stat("override of function helper '%s'", name);
-               remove_func_handler(pevent, name);
+               remove_func_handler(tep, name);
        }
 
        func_handle = calloc(1, sizeof(*func_handle));
@@ -6548,8 +6615,8 @@ int tep_register_print_function(struct tep_handle *pevent,
        }
        va_end(ap);
 
-       func_handle->next = pevent->func_handlers;
-       pevent->func_handlers = func_handle;
+       func_handle->next = tep->func_handlers;
+       tep->func_handlers = func_handle;
 
        return 0;
  out_free:
@@ -6560,7 +6627,7 @@ int tep_register_print_function(struct tep_handle *pevent,
 
 /**
  * tep_unregister_print_function - unregister a helper function
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @func: the function to process the helper function
  * @name: the name of the helper function
  *
@@ -6568,20 +6635,20 @@ int tep_register_print_function(struct tep_handle *pevent,
  *
  * Returns 0 if the handler was removed successully, -1 otherwise.
  */
-int tep_unregister_print_function(struct tep_handle *pevent,
+int tep_unregister_print_function(struct tep_handle *tep,
                                  tep_func_handler func, char *name)
 {
        struct tep_function_handler *func_handle;
 
-       func_handle = find_func_handler(pevent, name);
+       func_handle = find_func_handler(tep, name);
        if (func_handle && func_handle->func == func) {
-               remove_func_handler(pevent, name);
+               remove_func_handler(tep, name);
                return 0;
        }
        return -1;
 }
 
-static struct tep_event *search_event(struct tep_handle *pevent, int id,
+static struct tep_event *search_event(struct tep_handle *tep, int id,
                                      const char *sys_name,
                                      const char *event_name)
 {
@@ -6589,7 +6656,7 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id,
 
        if (id >= 0) {
                /* search by id */
-               event = tep_find_event(pevent, id);
+               event = tep_find_event(tep, id);
                if (!event)
                        return NULL;
                if (event_name && (strcmp(event_name, event->name) != 0))
@@ -6597,7 +6664,7 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id,
                if (sys_name && (strcmp(sys_name, event->system) != 0))
                        return NULL;
        } else {
-               event = tep_find_event_by_name(pevent, sys_name, event_name);
+               event = tep_find_event_by_name(tep, sys_name, event_name);
                if (!event)
                        return NULL;
        }
@@ -6606,7 +6673,7 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id,
 
 /**
  * tep_register_event_handler - register a way to parse an event
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @id: the id of the event to register
  * @sys_name: the system name the event belongs to
  * @event_name: the name of the event
@@ -6627,14 +6694,14 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id,
  *  negative TEP_ERRNO_... in case of an error
  *
  */
-int tep_register_event_handler(struct tep_handle *pevent, int id,
+int tep_register_event_handler(struct tep_handle *tep, int id,
                               const char *sys_name, const char *event_name,
                               tep_event_handler_func func, void *context)
 {
        struct tep_event *event;
        struct event_handler *handle;
 
-       event = search_event(pevent, id, sys_name, event_name);
+       event = search_event(tep, id, sys_name, event_name);
        if (event == NULL)
                goto not_found;
 
@@ -6669,8 +6736,8 @@ int tep_register_event_handler(struct tep_handle *pevent, int id,
        }
 
        handle->func = func;
-       handle->next = pevent->handlers;
-       pevent->handlers = handle;
+       handle->next = tep->handlers;
+       tep->handlers = handle;
        handle->context = context;
 
        return TEP_REGISTER_SUCCESS;
@@ -6697,7 +6764,7 @@ static int handle_matches(struct event_handler *handler, int id,
 
 /**
  * tep_unregister_event_handler - unregister an existing event handler
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
  * @id: the id of the event to unregister
  * @sys_name: the system name the handler belongs to
  * @event_name: the name of the event handler
@@ -6711,7 +6778,7 @@ static int handle_matches(struct event_handler *handler, int id,
  *
  * Returns 0 if handler was removed successfully, -1 if event was not found.
  */
-int tep_unregister_event_handler(struct tep_handle *pevent, int id,
+int tep_unregister_event_handler(struct tep_handle *tep, int id,
                                 const char *sys_name, const char *event_name,
                                 tep_event_handler_func func, void *context)
 {
@@ -6719,7 +6786,7 @@ int tep_unregister_event_handler(struct tep_handle *pevent, int id,
        struct event_handler *handle;
        struct event_handler **next;
 
-       event = search_event(pevent, id, sys_name, event_name);
+       event = search_event(tep, id, sys_name, event_name);
        if (event == NULL)
                goto not_found;
 
@@ -6733,7 +6800,7 @@ int tep_unregister_event_handler(struct tep_handle *pevent, int id,
        }
 
 not_found:
-       for (next = &pevent->handlers; *next; next = &(*next)->next) {
+       for (next = &tep->handlers; *next; next = &(*next)->next) {
                handle = *next;
                if (handle_matches(handle, id, sys_name, event_name,
                                   func, context))
@@ -6750,23 +6817,23 @@ not_found:
 }
 
 /**
- * tep_alloc - create a pevent handle
+ * tep_alloc - create a tep handle
  */
 struct tep_handle *tep_alloc(void)
 {
-       struct tep_handle *pevent = calloc(1, sizeof(*pevent));
+       struct tep_handle *tep = calloc(1, sizeof(*tep));
 
-       if (pevent) {
-               pevent->ref_count = 1;
-               pevent->host_bigendian = tep_host_bigendian();
+       if (tep) {
+               tep->ref_count = 1;
+               tep->host_bigendian = tep_is_bigendian();
        }
 
-       return pevent;
+       return tep;
 }
 
-void tep_ref(struct tep_handle *pevent)
+void tep_ref(struct tep_handle *tep)
 {
-       pevent->ref_count++;
+       tep->ref_count++;
 }
 
 int tep_get_ref(struct tep_handle *tep)
@@ -6816,10 +6883,10 @@ void tep_free_event(struct tep_event *event)
 }
 
 /**
- * tep_free - free a pevent handle
- * @pevent: the pevent handle to free
+ * tep_free - free a tep handle
+ * @tep: the tep handle to free
  */
-void tep_free(struct tep_handle *pevent)
+void tep_free(struct tep_handle *tep)
 {
        struct cmdline_list *cmdlist, *cmdnext;
        struct func_list *funclist, *funcnext;
@@ -6828,21 +6895,21 @@ void tep_free(struct tep_handle *pevent)
        struct event_handler *handle;
        int i;
 
-       if (!pevent)
+       if (!tep)
                return;
 
-       cmdlist = pevent->cmdlist;
-       funclist = pevent->funclist;
-       printklist = pevent->printklist;
+       cmdlist = tep->cmdlist;
+       funclist = tep->funclist;
+       printklist = tep->printklist;
 
-       pevent->ref_count--;
-       if (pevent->ref_count)
+       tep->ref_count--;
+       if (tep->ref_count)
                return;
 
-       if (pevent->cmdlines) {
-               for (i = 0; i < pevent->cmdline_count; i++)
-                       free(pevent->cmdlines[i].comm);
-               free(pevent->cmdlines);
+       if (tep->cmdlines) {
+               for (i = 0; i < tep->cmdline_count; i++)
+                       free(tep->cmdlines[i].comm);
+               free(tep->cmdlines);
        }
 
        while (cmdlist) {
@@ -6852,12 +6919,12 @@ void tep_free(struct tep_handle *pevent)
                cmdlist = cmdnext;
        }
 
-       if (pevent->func_map) {
-               for (i = 0; i < (int)pevent->func_count; i++) {
-                       free(pevent->func_map[i].func);
-                       free(pevent->func_map[i].mod);
+       if (tep->func_map) {
+               for (i = 0; i < (int)tep->func_count; i++) {
+                       free(tep->func_map[i].func);
+                       free(tep->func_map[i].mod);
                }
-               free(pevent->func_map);
+               free(tep->func_map);
        }
 
        while (funclist) {
@@ -6868,16 +6935,16 @@ void tep_free(struct tep_handle *pevent)
                funclist = funcnext;
        }
 
-       while (pevent->func_handlers) {
-               func_handler = pevent->func_handlers;
-               pevent->func_handlers = func_handler->next;
+       while (tep->func_handlers) {
+               func_handler = tep->func_handlers;
+               tep->func_handlers = func_handler->next;
                free_func_handle(func_handler);
        }
 
-       if (pevent->printk_map) {
-               for (i = 0; i < (int)pevent->printk_count; i++)
-                       free(pevent->printk_map[i].printk);
-               free(pevent->printk_map);
+       if (tep->printk_map) {
+               for (i = 0; i < (int)tep->printk_count; i++)
+                       free(tep->printk_map[i].printk);
+               free(tep->printk_map);
        }
 
        while (printklist) {
@@ -6887,24 +6954,24 @@ void tep_free(struct tep_handle *pevent)
                printklist = printknext;
        }
 
-       for (i = 0; i < pevent->nr_events; i++)
-               tep_free_event(pevent->events[i]);
+       for (i = 0; i < tep->nr_events; i++)
+               tep_free_event(tep->events[i]);
 
-       while (pevent->handlers) {
-               handle = pevent->handlers;
-               pevent->handlers = handle->next;
+       while (tep->handlers) {
+               handle = tep->handlers;
+               tep->handlers = handle->next;
                free_handler(handle);
        }
 
-       free(pevent->trace_clock);
-       free(pevent->events);
-       free(pevent->sort_events);
-       free(pevent->func_resolver);
+       free(tep->trace_clock);
+       free(tep->events);
+       free(tep->sort_events);
+       free(tep->func_resolver);
 
-       free(pevent);
+       free(tep);
 }
 
-void tep_unref(struct tep_handle *pevent)
+void tep_unref(struct tep_handle *tep)
 {
-       tep_free(pevent);
+       tep_free(tep);
 }
index aec48f2aea8af6647225ef622c22a5efa6e26e6f..642f68ab5fb2bc7e8704bf70d44a1eb1a86c7273 100644 (file)
@@ -64,8 +64,8 @@ typedef int (*tep_event_handler_func)(struct trace_seq *s,
                                      struct tep_event *event,
                                      void *context);
 
-typedef int (*tep_plugin_load_func)(struct tep_handle *pevent);
-typedef int (*tep_plugin_unload_func)(struct tep_handle *pevent);
+typedef int (*tep_plugin_load_func)(struct tep_handle *tep);
+typedef int (*tep_plugin_unload_func)(struct tep_handle *tep);
 
 struct tep_plugin_option {
        struct tep_plugin_option        *next;
@@ -85,12 +85,12 @@ struct tep_plugin_option {
  * TEP_PLUGIN_LOADER:  (required)
  *   The function name to initialized the plugin.
  *
- *   int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+ *   int TEP_PLUGIN_LOADER(struct tep_handle *tep)
  *
  * TEP_PLUGIN_UNLOADER:  (optional)
  *   The function called just before unloading
  *
- *   int TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+ *   int TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
  *
  * TEP_PLUGIN_OPTIONS:  (optional)
  *   Plugin options that can be set before loading
@@ -278,7 +278,7 @@ struct tep_print_fmt {
 };
 
 struct tep_event {
-       struct tep_handle       *pevent;
+       struct tep_handle       *tep;
        char                    *name;
        int                     id;
        int                     flags;
@@ -393,9 +393,9 @@ struct tep_plugin_list;
 
 #define INVALID_PLUGIN_LIST_OPTION     ((char **)((unsigned long)-1))
 
-struct tep_plugin_list *tep_load_plugins(struct tep_handle *pevent);
+struct tep_plugin_list *tep_load_plugins(struct tep_handle *tep);
 void tep_unload_plugins(struct tep_plugin_list *plugin_list,
-                       struct tep_handle *pevent);
+                       struct tep_handle *tep);
 char **tep_plugin_list_options(void);
 void tep_plugin_free_options_list(char **list);
 int tep_plugin_add_options(const char *name,
@@ -409,8 +409,10 @@ void tep_print_plugins(struct trace_seq *s,
 typedef char *(tep_func_resolver_t)(void *priv,
                                    unsigned long long *addrp, char **modp);
 void tep_set_flag(struct tep_handle *tep, int flag);
+void tep_clear_flag(struct tep_handle *tep, enum tep_flag flag);
+bool tep_test_flag(struct tep_handle *tep, enum tep_flag flags);
 
-static inline int tep_host_bigendian(void)
+static inline int tep_is_bigendian(void)
 {
        unsigned char str[] = { 0x1, 0x2, 0x3, 0x4 };
        unsigned int val;
@@ -428,37 +430,37 @@ enum trace_flag_type {
        TRACE_FLAG_SOFTIRQ              = 0x10,
 };
 
-int tep_set_function_resolver(struct tep_handle *pevent,
+int tep_set_function_resolver(struct tep_handle *tep,
                              tep_func_resolver_t *func, void *priv);
-void tep_reset_function_resolver(struct tep_handle *pevent);
-int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid);
-int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid);
-int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock);
-int tep_register_function(struct tep_handle *pevent, char *name,
+void tep_reset_function_resolver(struct tep_handle *tep);
+int tep_register_comm(struct tep_handle *tep, const char *comm, int pid);
+int tep_override_comm(struct tep_handle *tep, const char *comm, int pid);
+int tep_register_trace_clock(struct tep_handle *tep, const char *trace_clock);
+int tep_register_function(struct tep_handle *tep, char *name,
                          unsigned long long addr, char *mod);
-int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
+int tep_register_print_string(struct tep_handle *tep, const char *fmt,
                              unsigned long long addr);
-int tep_pid_is_registered(struct tep_handle *pevent, int pid);
+bool tep_is_pid_registered(struct tep_handle *tep, int pid);
 
-void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_task(struct tep_handle *tep, struct trace_seq *s,
                          struct tep_event *event,
                          struct tep_record *record);
-void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_time(struct tep_handle *tep, struct trace_seq *s,
                          struct tep_event *event,
                          struct tep_record *record,
                          bool use_trace_clock);
-void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_data(struct tep_handle *tep, struct trace_seq *s,
                          struct tep_event *event,
                          struct tep_record *record);
-void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event(struct tep_handle *tep, struct trace_seq *s,
                     struct tep_record *record, bool use_trace_clock);
 
-int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long size,
+int tep_parse_header_page(struct tep_handle *tep, char *buf, unsigned long size,
                          int long_size);
 
-enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf,
+enum tep_errno tep_parse_event(struct tep_handle *tep, const char *buf,
                               unsigned long size, const char *sys);
-enum tep_errno tep_parse_format(struct tep_handle *pevent,
+enum tep_errno tep_parse_format(struct tep_handle *tep,
                                struct tep_event **eventp,
                                const char *buf,
                                unsigned long size, const char *sys);
@@ -490,50 +492,50 @@ enum tep_reg_handler {
        TEP_REGISTER_SUCCESS_OVERWRITE,
 };
 
-int tep_register_event_handler(struct tep_handle *pevent, int id,
+int tep_register_event_handler(struct tep_handle *tep, int id,
                               const char *sys_name, const char *event_name,
                               tep_event_handler_func func, void *context);
-int tep_unregister_event_handler(struct tep_handle *pevent, int id,
+int tep_unregister_event_handler(struct tep_handle *tep, int id,
                                 const char *sys_name, const char *event_name,
                                 tep_event_handler_func func, void *context);
-int tep_register_print_function(struct tep_handle *pevent,
+int tep_register_print_function(struct tep_handle *tep,
                                tep_func_handler func,
                                enum tep_func_arg_type ret_type,
                                char *name, ...);
-int tep_unregister_print_function(struct tep_handle *pevent,
+int tep_unregister_print_function(struct tep_handle *tep,
                                  tep_func_handler func, char *name);
 
 struct tep_format_field *tep_find_common_field(struct tep_event *event, const char *name);
 struct tep_format_field *tep_find_field(struct tep_event *event, const char *name);
 struct tep_format_field *tep_find_any_field(struct tep_event *event, const char *name);
 
-const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr);
+const char *tep_find_function(struct tep_handle *tep, unsigned long long addr);
 unsigned long long
-tep_find_function_address(struct tep_handle *pevent, unsigned long long addr);
-unsigned long long tep_read_number(struct tep_handle *pevent, const void *ptr, int size);
+tep_find_function_address(struct tep_handle *tep, unsigned long long addr);
+unsigned long long tep_read_number(struct tep_handle *tep, const void *ptr, int size);
 int tep_read_number_field(struct tep_format_field *field, const void *data,
                          unsigned long long *value);
 
 struct tep_event *tep_get_first_event(struct tep_handle *tep);
 int tep_get_events_count(struct tep_handle *tep);
-struct tep_event *tep_find_event(struct tep_handle *pevent, int id);
+struct tep_event *tep_find_event(struct tep_handle *tep, int id);
 
 struct tep_event *
-tep_find_event_by_name(struct tep_handle *pevent, const char *sys, const char *name);
+tep_find_event_by_name(struct tep_handle *tep, const char *sys, const char *name);
 struct tep_event *
-tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record);
-
-void tep_data_lat_fmt(struct tep_handle *pevent,
-                     struct trace_seq *s, struct tep_record *record);
-int tep_data_type(struct tep_handle *pevent, struct tep_record *rec);
-int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec);
-int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec);
-int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec);
-const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid);
+tep_find_event_by_record(struct tep_handle *tep, struct tep_record *record);
+
+void tep_data_latency_format(struct tep_handle *tep,
+                            struct trace_seq *s, struct tep_record *record);
+int tep_data_type(struct tep_handle *tep, struct tep_record *rec);
+int tep_data_pid(struct tep_handle *tep, struct tep_record *rec);
+int tep_data_preempt_count(struct tep_handle *tep, struct tep_record *rec);
+int tep_data_flags(struct tep_handle *tep, struct tep_record *rec);
+const char *tep_data_comm_from_pid(struct tep_handle *tep, int pid);
 struct tep_cmdline;
-struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
+struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *tep, const char *comm,
                                           struct tep_cmdline *next);
-int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline);
+int tep_cmdline_pid(struct tep_handle *tep, struct tep_cmdline *cmdline);
 
 void tep_print_field(struct trace_seq *s, void *data,
                     struct tep_format_field *field);
@@ -541,10 +543,12 @@ void tep_print_fields(struct trace_seq *s, void *data,
                      int size __maybe_unused, struct tep_event *event);
 void tep_event_info(struct trace_seq *s, struct tep_event *event,
                    struct tep_record *record);
-int tep_strerror(struct tep_handle *pevent, enum tep_errno errnum,
+int tep_strerror(struct tep_handle *tep, enum tep_errno errnum,
                 char *buf, size_t buflen);
 
-struct tep_event **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type);
+struct tep_event **tep_list_events(struct tep_handle *tep, enum tep_event_sort_type);
+struct tep_event **tep_list_events_copy(struct tep_handle *tep,
+                                       enum tep_event_sort_type);
 struct tep_format_field **tep_event_common_fields(struct tep_event *event);
 struct tep_format_field **tep_event_fields(struct tep_event *event);
 
@@ -552,24 +556,28 @@ enum tep_endian {
         TEP_LITTLE_ENDIAN = 0,
         TEP_BIG_ENDIAN
 };
-int tep_get_cpus(struct tep_handle *pevent);
-void tep_set_cpus(struct tep_handle *pevent, int cpus);
-int tep_get_long_size(struct tep_handle *pevent);
-void tep_set_long_size(struct tep_handle *pevent, int long_size);
-int tep_get_page_size(struct tep_handle *pevent);
-void tep_set_page_size(struct tep_handle *pevent, int _page_size);
-int tep_file_bigendian(struct tep_handle *pevent);
-void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian);
-int tep_is_host_bigendian(struct tep_handle *pevent);
-void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian);
-int tep_is_latency_format(struct tep_handle *pevent);
-void tep_set_latency_format(struct tep_handle *pevent, int lat);
-int tep_get_header_page_size(struct tep_handle *pevent);
+int tep_get_cpus(struct tep_handle *tep);
+void tep_set_cpus(struct tep_handle *tep, int cpus);
+int tep_get_long_size(struct tep_handle *tep);
+void tep_set_long_size(struct tep_handle *tep, int long_size);
+int tep_get_page_size(struct tep_handle *tep);
+void tep_set_page_size(struct tep_handle *tep, int _page_size);
+bool tep_is_file_bigendian(struct tep_handle *tep);
+void tep_set_file_bigendian(struct tep_handle *tep, enum tep_endian endian);
+bool tep_is_local_bigendian(struct tep_handle *tep);
+void tep_set_local_bigendian(struct tep_handle *tep, enum tep_endian endian);
+bool tep_is_latency_format(struct tep_handle *tep);
+void tep_set_latency_format(struct tep_handle *tep, int lat);
+int tep_get_header_page_size(struct tep_handle *tep);
+int tep_get_header_timestamp_size(struct tep_handle *tep);
+bool tep_is_old_format(struct tep_handle *tep);
+void tep_set_print_raw(struct tep_handle *tep, int print_raw);
+void tep_set_test_filters(struct tep_handle *tep, int test_filters);
 
 struct tep_handle *tep_alloc(void);
-void tep_free(struct tep_handle *pevent);
-void tep_ref(struct tep_handle *pevent);
-void tep_unref(struct tep_handle *pevent);
+void tep_free(struct tep_handle *tep);
+void tep_ref(struct tep_handle *tep);
+void tep_unref(struct tep_handle *tep);
 int tep_get_ref(struct tep_handle *tep);
 
 /* access to the internal parser */
@@ -581,8 +589,8 @@ const char *tep_get_input_buf(void);
 unsigned long long tep_get_input_buf_ptr(void);
 
 /* for debugging */
-void tep_print_funcs(struct tep_handle *pevent);
-void tep_print_printk(struct tep_handle *pevent);
+void tep_print_funcs(struct tep_handle *tep);
+void tep_print_printk(struct tep_handle *tep);
 
 /* ----------------------- filtering ----------------------- */
 
@@ -709,13 +717,13 @@ struct tep_filter_type {
 #define TEP_FILTER_ERROR_BUFSZ  1024
 
 struct tep_event_filter {
-       struct tep_handle       *pevent;
+       struct tep_handle       *tep;
        int                     filters;
        struct tep_filter_type  *event_filters;
        char                    error_buffer[TEP_FILTER_ERROR_BUFSZ];
 };
 
-struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent);
+struct tep_event_filter *tep_filter_alloc(struct tep_handle *tep);
 
 /* for backward compatibility */
 #define FILTER_NONE            TEP_ERRNO__NO_FILTER
@@ -723,12 +731,6 @@ struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent);
 #define FILTER_MISS            TEP_ERRNO__FILTER_MISS
 #define FILTER_MATCH           TEP_ERRNO__FILTER_MATCH
 
-enum tep_filter_trivial_type {
-       TEP_FILTER_TRIVIAL_FALSE,
-       TEP_FILTER_TRIVIAL_TRUE,
-       TEP_FILTER_TRIVIAL_BOTH,
-};
-
 enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
                                         const char *filter_str);
 
@@ -743,9 +745,6 @@ int tep_event_filtered(struct tep_event_filter *filter,
 
 void tep_filter_reset(struct tep_event_filter *filter);
 
-int tep_filter_clear_trivial(struct tep_event_filter *filter,
-                            enum tep_filter_trivial_type type);
-
 void tep_filter_free(struct tep_event_filter *filter);
 
 char *tep_filter_make_string(struct tep_event_filter *filter, int event_id);
@@ -753,15 +752,8 @@ char *tep_filter_make_string(struct tep_event_filter *filter, int event_id);
 int tep_filter_remove_event(struct tep_event_filter *filter,
                            int event_id);
 
-int tep_filter_event_has_trivial(struct tep_event_filter *filter,
-                                int event_id,
-                                enum tep_filter_trivial_type type);
-
 int tep_filter_copy(struct tep_event_filter *dest, struct tep_event_filter *source);
 
-int tep_update_trivial(struct tep_event_filter *dest, struct tep_event_filter *source,
-                       enum tep_filter_trivial_type type);
-
 int tep_filter_compare(struct tep_event_filter *filter1, struct tep_event_filter *filter2);
 
 #endif /* _PARSE_EVENTS_H */
index e74f16c88398fcc4faefe8a31238e0403ff292e3..8ca28de9337a5314c23745fe9e3b90b46c86873d 100644 (file)
@@ -269,7 +269,7 @@ void tep_print_plugins(struct trace_seq *s,
 }
 
 static void
-load_plugin(struct tep_handle *pevent, const char *path,
+load_plugin(struct tep_handle *tep, const char *path,
            const char *file, void *data)
 {
        struct tep_plugin_list **plugin_list = data;
@@ -316,7 +316,7 @@ load_plugin(struct tep_handle *pevent, const char *path,
        *plugin_list = list;
 
        pr_stat("registering plugin: %s", plugin);
-       func(pevent);
+       func(tep);
        return;
 
  out_free:
@@ -324,9 +324,9 @@ load_plugin(struct tep_handle *pevent, const char *path,
 }
 
 static void
-load_plugins_dir(struct tep_handle *pevent, const char *suffix,
+load_plugins_dir(struct tep_handle *tep, const char *suffix,
                 const char *path,
-                void (*load_plugin)(struct tep_handle *pevent,
+                void (*load_plugin)(struct tep_handle *tep,
                                     const char *path,
                                     const char *name,
                                     void *data),
@@ -359,15 +359,15 @@ load_plugins_dir(struct tep_handle *pevent, const char *suffix,
                if (strcmp(name + (strlen(name) - strlen(suffix)), suffix) != 0)
                        continue;
 
-               load_plugin(pevent, path, name, data);
+               load_plugin(tep, path, name, data);
        }
 
        closedir(dir);
 }
 
 static void
-load_plugins(struct tep_handle *pevent, const char *suffix,
-            void (*load_plugin)(struct tep_handle *pevent,
+load_plugins(struct tep_handle *tep, const char *suffix,
+            void (*load_plugin)(struct tep_handle *tep,
                                 const char *path,
                                 const char *name,
                                 void *data),
@@ -378,7 +378,7 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
        char *envdir;
        int ret;
 
-       if (pevent->flags & TEP_DISABLE_PLUGINS)
+       if (tep->flags & TEP_DISABLE_PLUGINS)
                return;
 
        /*
@@ -386,8 +386,8 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
         * check that first.
         */
 #ifdef PLUGIN_DIR
-       if (!(pevent->flags & TEP_DISABLE_SYS_PLUGINS))
-               load_plugins_dir(pevent, suffix, PLUGIN_DIR,
+       if (!(tep->flags & TEP_DISABLE_SYS_PLUGINS))
+               load_plugins_dir(tep, suffix, PLUGIN_DIR,
                                 load_plugin, data);
 #endif
 
@@ -397,7 +397,7 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
         */
        envdir = getenv("TRACEEVENT_PLUGIN_DIR");
        if (envdir)
-               load_plugins_dir(pevent, suffix, envdir, load_plugin, data);
+               load_plugins_dir(tep, suffix, envdir, load_plugin, data);
 
        /*
         * Now let the home directory override the environment
@@ -413,22 +413,22 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
                return;
        }
 
-       load_plugins_dir(pevent, suffix, path, load_plugin, data);
+       load_plugins_dir(tep, suffix, path, load_plugin, data);
 
        free(path);
 }
 
 struct tep_plugin_list*
-tep_load_plugins(struct tep_handle *pevent)
+tep_load_plugins(struct tep_handle *tep)
 {
        struct tep_plugin_list *list = NULL;
 
-       load_plugins(pevent, ".so", load_plugin, &list);
+       load_plugins(tep, ".so", load_plugin, &list);
        return list;
 }
 
 void
-tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *pevent)
+tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *tep)
 {
        tep_plugin_unload_func func;
        struct tep_plugin_list *list;
@@ -438,7 +438,7 @@ tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *peven
                plugin_list = list->next;
                func = dlsym(list->handle, TEP_PLUGIN_UNLOADER_NAME);
                if (func)
-                       func(pevent);
+                       func(tep);
                dlclose(list->handle);
                free(list->name);
                free(list);
index af2a1f3b7424141483c514daf62260b3fba08cf7..b887e7437d67423aa48209ff1826f16915f67ba4 100644 (file)
@@ -727,3 +727,52 @@ int kbuffer_start_of_data(struct kbuffer *kbuf)
 {
        return kbuf->start;
 }
+
+/**
+ * kbuffer_raw_get - get raw buffer info
+ * @kbuf:      The kbuffer
+ * @subbuf:    Start of mapped subbuffer
+ * @info:      Info descriptor to fill in
+ *
+ * For debugging. This can return internals of the ring buffer.
+ * Expects to have info->next set to what it will read.
+ * The type, length and timestamp delta will be filled in, and
+ * @info->next will be updated to the next element.
+ * The @subbuf is used to know if the info is passed the end of
+ * data and NULL will be returned if it is.
+ */
+struct kbuffer_raw_info *
+kbuffer_raw_get(struct kbuffer *kbuf, void *subbuf, struct kbuffer_raw_info *info)
+{
+       unsigned long long flags;
+       unsigned long long delta;
+       unsigned int type_len;
+       unsigned int size;
+       int start;
+       int length;
+       void *ptr = info->next;
+
+       if (!kbuf || !subbuf)
+               return NULL;
+
+       if (kbuf->flags & KBUFFER_FL_LONG_8)
+               start = 16;
+       else
+               start = 12;
+
+       flags = read_long(kbuf, subbuf + 8);
+       size = (unsigned int)flags & COMMIT_MASK;
+
+       if (ptr < subbuf || ptr >= subbuf + start + size)
+               return NULL;
+
+       type_len = translate_data(kbuf, ptr, &ptr, &delta, &length);
+
+       info->next = ptr + length;
+
+       info->type = type_len;
+       info->delta = delta;
+       info->length = length;
+
+       return info;
+}
index 03dce757553f14b123f7f678b42550722e5c9656..ed4d697fc137861d9b98a23daef9f19d5e58da59 100644 (file)
@@ -65,4 +65,17 @@ int kbuffer_subbuffer_size(struct kbuffer *kbuf);
 void kbuffer_set_old_format(struct kbuffer *kbuf);
 int kbuffer_start_of_data(struct kbuffer *kbuf);
 
+/* Debugging */
+
+struct kbuffer_raw_info {
+       int                     type;
+       int                     length;
+       unsigned long long      delta;
+       void                    *next;
+};
+
+/* Read raw data */
+struct kbuffer_raw_info *kbuffer_raw_get(struct kbuffer *kbuf, void *subbuf,
+                                        struct kbuffer_raw_info *info);
+
 #endif /* _K_BUFFER_H */
index cb5ce66dab6e05abff3912179b6975f358c1329b..552592d153fb813af9ae76d9453bd5da73a3c60b 100644 (file)
@@ -154,7 +154,7 @@ add_filter_type(struct tep_event_filter *filter, int id)
 
        filter_type = &filter->event_filters[i];
        filter_type->event_id = id;
-       filter_type->event = tep_find_event(filter->pevent, id);
+       filter_type->event = tep_find_event(filter->tep, id);
        filter_type->filter = NULL;
 
        filter->filters++;
@@ -164,9 +164,9 @@ add_filter_type(struct tep_event_filter *filter, int id)
 
 /**
  * tep_filter_alloc - create a new event filter
- * @pevent: The pevent that this filter is associated with
+ * @tep: The tep that this filter is associated with
  */
-struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent)
+struct tep_event_filter *tep_filter_alloc(struct tep_handle *tep)
 {
        struct tep_event_filter *filter;
 
@@ -175,8 +175,8 @@ struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent)
                return NULL;
 
        memset(filter, 0, sizeof(*filter));
-       filter->pevent = pevent;
-       tep_ref(pevent);
+       filter->tep = tep;
+       tep_ref(tep);
 
        return filter;
 }
@@ -256,7 +256,7 @@ static int event_match(struct tep_event *event,
 }
 
 static enum tep_errno
-find_event(struct tep_handle *pevent, struct event_list **events,
+find_event(struct tep_handle *tep, struct event_list **events,
           char *sys_name, char *event_name)
 {
        struct tep_event *event;
@@ -299,8 +299,8 @@ find_event(struct tep_handle *pevent, struct event_list **events,
                }
        }
 
-       for (i = 0; i < pevent->nr_events; i++) {
-               event = pevent->events[i];
+       for (i = 0; i < tep->nr_events; i++) {
+               event = tep->events[i];
                if (event_match(event, sys_name ? &sreg : NULL, &ereg)) {
                        match = 1;
                        if (add_event(events, event) < 0) {
@@ -1257,7 +1257,7 @@ static void filter_init_error_buf(struct tep_event_filter *filter)
 enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
                                         const char *filter_str)
 {
-       struct tep_handle *pevent = filter->pevent;
+       struct tep_handle *tep = filter->tep;
        struct event_list *event;
        struct event_list *events = NULL;
        const char *filter_start;
@@ -1313,7 +1313,7 @@ enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
                }
 
                /* Find this event */
-               ret = find_event(pevent, &events, strim(sys_name), strim(event_name));
+               ret = find_event(tep, &events, strim(sys_name), strim(event_name));
                if (ret < 0) {
                        free_events(events);
                        free(this_event);
@@ -1334,7 +1334,7 @@ enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
                if (ret < 0)
                        rtn = ret;
 
-               if (ret >= 0 && pevent->test_filters) {
+               if (ret >= 0 && tep->test_filters) {
                        char *test;
                        test = tep_filter_make_string(filter, event->event->id);
                        if (test) {
@@ -1346,9 +1346,6 @@ enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
 
        free_events(events);
 
-       if (rtn >= 0 && pevent->test_filters)
-               exit(0);
-
        return rtn;
 }
 
@@ -1380,7 +1377,7 @@ int tep_filter_strerror(struct tep_event_filter *filter, enum tep_errno err,
                return 0;
        }
 
-       return tep_strerror(filter->pevent, err, buf, buflen);
+       return tep_strerror(filter->tep, err, buf, buflen);
 }
 
 /**
@@ -1443,7 +1440,7 @@ void tep_filter_reset(struct tep_event_filter *filter)
 
 void tep_filter_free(struct tep_event_filter *filter)
 {
-       tep_unref(filter->pevent);
+       tep_unref(filter->tep);
 
        tep_filter_reset(filter);
 
@@ -1462,10 +1459,10 @@ static int copy_filter_type(struct tep_event_filter *filter,
        const char *name;
        char *str;
 
-       /* Can't assume that the pevent's are the same */
+       /* Can't assume that the tep's are the same */
        sys = filter_type->event->system;
        name = filter_type->event->name;
-       event = tep_find_event_by_name(filter->pevent, sys, name);
+       event = tep_find_event_by_name(filter->tep, sys, name);
        if (!event)
                return -1;
 
@@ -1522,167 +1519,6 @@ int tep_filter_copy(struct tep_event_filter *dest, struct tep_event_filter *sour
        return ret;
 }
 
-
-/**
- * tep_update_trivial - update the trivial filters with the given filter
- * @dest - the filter to update
- * @source - the filter as the source of the update
- * @type - the type of trivial filter to update.
- *
- * Scan dest for trivial events matching @type to replace with the source.
- *
- * Returns 0 on success and -1 if there was a problem updating, but
- *   events may have still been updated on error.
- */
-int tep_update_trivial(struct tep_event_filter *dest, struct tep_event_filter *source,
-                      enum tep_filter_trivial_type type)
-{
-       struct tep_handle *src_pevent;
-       struct tep_handle *dest_pevent;
-       struct tep_event *event;
-       struct tep_filter_type *filter_type;
-       struct tep_filter_arg *arg;
-       char *str;
-       int i;
-
-       src_pevent = source->pevent;
-       dest_pevent = dest->pevent;
-
-       /* Do nothing if either of the filters has nothing to filter */
-       if (!dest->filters || !source->filters)
-               return 0;
-
-       for (i = 0; i < dest->filters; i++) {
-               filter_type = &dest->event_filters[i];
-               arg = filter_type->filter;
-               if (arg->type != TEP_FILTER_ARG_BOOLEAN)
-                       continue;
-               if ((arg->boolean.value && type == TEP_FILTER_TRIVIAL_FALSE) ||
-                   (!arg->boolean.value && type == TEP_FILTER_TRIVIAL_TRUE))
-                       continue;
-
-               event = filter_type->event;
-
-               if (src_pevent != dest_pevent) {
-                       /* do a look up */
-                       event = tep_find_event_by_name(src_pevent,
-                                                      event->system,
-                                                      event->name);
-                       if (!event)
-                               return -1;
-               }
-
-               str = tep_filter_make_string(source, event->id);
-               if (!str)
-                       continue;
-
-               /* Don't bother if the filter is trivial too */
-               if (strcmp(str, "TRUE") != 0 && strcmp(str, "FALSE") != 0)
-                       filter_event(dest, event, str, NULL);
-               free(str);
-       }
-       return 0;
-}
-
-/**
- * tep_filter_clear_trivial - clear TRUE and FALSE filters
- * @filter: the filter to remove trivial filters from
- * @type: remove only true, false, or both
- *
- * Removes filters that only contain a TRUE or FALES boolean arg.
- *
- * Returns 0 on success and -1 if there was a problem.
- */
-int tep_filter_clear_trivial(struct tep_event_filter *filter,
-                            enum tep_filter_trivial_type type)
-{
-       struct tep_filter_type *filter_type;
-       int count = 0;
-       int *ids = NULL;
-       int i;
-
-       if (!filter->filters)
-               return 0;
-
-       /*
-        * Two steps, first get all ids with trivial filters.
-        *  then remove those ids.
-        */
-       for (i = 0; i < filter->filters; i++) {
-               int *new_ids;
-
-               filter_type = &filter->event_filters[i];
-               if (filter_type->filter->type != TEP_FILTER_ARG_BOOLEAN)
-                       continue;
-               switch (type) {
-               case TEP_FILTER_TRIVIAL_FALSE:
-                       if (filter_type->filter->boolean.value)
-                               continue;
-                       break;
-               case TEP_FILTER_TRIVIAL_TRUE:
-                       if (!filter_type->filter->boolean.value)
-                               continue;
-               default:
-                       break;
-               }
-
-               new_ids = realloc(ids, sizeof(*ids) * (count + 1));
-               if (!new_ids) {
-                       free(ids);
-                       return -1;
-               }
-
-               ids = new_ids;
-               ids[count++] = filter_type->event_id;
-       }
-
-       if (!count)
-               return 0;
-
-       for (i = 0; i < count; i++)
-               tep_filter_remove_event(filter, ids[i]);
-
-       free(ids);
-       return 0;
-}
-
-/**
- * tep_filter_event_has_trivial - return true event contains trivial filter
- * @filter: the filter with the information
- * @event_id: the id of the event to test
- * @type: trivial type to test for (TRUE, FALSE, EITHER)
- *
- * Returns 1 if the event contains a matching trivial type
- *  otherwise 0.
- */
-int tep_filter_event_has_trivial(struct tep_event_filter *filter,
-                                int event_id,
-                                enum tep_filter_trivial_type type)
-{
-       struct tep_filter_type *filter_type;
-
-       if (!filter->filters)
-               return 0;
-
-       filter_type = find_filter_type(filter, event_id);
-
-       if (!filter_type)
-               return 0;
-
-       if (filter_type->filter->type != TEP_FILTER_ARG_BOOLEAN)
-               return 0;
-
-       switch (type) {
-       case TEP_FILTER_TRIVIAL_FALSE:
-               return !filter_type->filter->boolean.value;
-
-       case TEP_FILTER_TRIVIAL_TRUE:
-               return filter_type->filter->boolean.value;
-       default:
-               return 1;
-       }
-}
-
 static int test_filter(struct tep_event *event, struct tep_filter_arg *arg,
                       struct tep_record *record, enum tep_errno *err);
 
@@ -1692,8 +1528,8 @@ get_comm(struct tep_event *event, struct tep_record *record)
        const char *comm;
        int pid;
 
-       pid = tep_data_pid(event->pevent, record);
-       comm = tep_data_comm_from_pid(event->pevent, pid);
+       pid = tep_data_pid(event->tep, record);
+       comm = tep_data_comm_from_pid(event->tep, pid);
        return comm;
 }
 
@@ -1861,7 +1697,7 @@ static int test_num(struct tep_event *event, struct tep_filter_arg *arg,
 static const char *get_field_str(struct tep_filter_arg *arg, struct tep_record *record)
 {
        struct tep_event *event;
-       struct tep_handle *pevent;
+       struct tep_handle *tep;
        unsigned long long addr;
        const char *val = NULL;
        unsigned int size;
@@ -1891,12 +1727,12 @@ static const char *get_field_str(struct tep_filter_arg *arg, struct tep_record *
 
        } else {
                event = arg->str.field->event;
-               pevent = event->pevent;
+               tep = event->tep;
                addr = get_value(event, arg->str.field, record);
 
                if (arg->str.field->flags & (TEP_FIELD_IS_POINTER | TEP_FIELD_IS_LONG))
                        /* convert to a kernel symbol */
-                       val = tep_find_function(pevent, addr);
+                       val = tep_find_function(tep, addr);
 
                if (val == NULL) {
                        /* just use the hex of the string name */
@@ -2036,7 +1872,7 @@ int tep_event_filtered(struct tep_event_filter *filter, int event_id)
 enum tep_errno tep_filter_match(struct tep_event_filter *filter,
                                struct tep_record *record)
 {
-       struct tep_handle *pevent = filter->pevent;
+       struct tep_handle *tep = filter->tep;
        struct tep_filter_type *filter_type;
        int event_id;
        int ret;
@@ -2047,7 +1883,7 @@ enum tep_errno tep_filter_match(struct tep_event_filter *filter,
        if (!filter->filters)
                return TEP_ERRNO__NO_FILTER;
 
-       event_id = tep_data_type(pevent, record);
+       event_id = tep_data_type(tep, record);
 
        filter_type = find_filter_type(filter, event_id);
        if (!filter_type)
@@ -2409,14 +2245,6 @@ int tep_filter_compare(struct tep_event_filter *filter1, struct tep_event_filter
                        break;
                if (filter_type1->filter->type != filter_type2->filter->type)
                        break;
-               switch (filter_type1->filter->type) {
-               case TEP_FILTER_TRIVIAL_FALSE:
-               case TEP_FILTER_TRIVIAL_TRUE:
-                       /* trivial types just need the type compared */
-                       continue;
-               default:
-                       break;
-               }
                /* The best way to compare complex filters is with strings */
                str1 = arg_to_str(filter1, filter_type1->filter);
                str2 = arg_to_str(filter2, filter_type2->filter);
index 77e4ec6402dd3fef5e7832cbe1afe41f83db3906..e99867111387f38f511b5651609ac79ccc390e43 100644 (file)
@@ -14,7 +14,7 @@
 void __vwarning(const char *fmt, va_list ap)
 {
        if (errno)
-               perror("trace-cmd");
+               perror("libtraceevent");
        errno = 0;
 
        fprintf(stderr, "  ");
index a51b366f47dad91500f540a3f2198013943556ae..3d43b56a6c98436aefc37144e0b109a6125d03d7 100644 (file)
@@ -25,9 +25,9 @@ process___le16_to_cpup(struct trace_seq *s, unsigned long long *args)
        return val ? (long long) le16toh(*val) : 0;
 }
 
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
 {
-       tep_register_print_function(pevent,
+       tep_register_print_function(tep,
                                    process___le16_to_cpup,
                                    TEP_FUNC_ARG_INT,
                                    "__le16_to_cpup",
@@ -36,8 +36,8 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
        return 0;
 }
 
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
 {
-       tep_unregister_print_function(pevent, process___le16_to_cpup,
+       tep_unregister_print_function(tep, process___le16_to_cpup,
                                      "__le16_to_cpup");
 }
index a73eca34a8f963e75999128f6335d7e812dd0bf9..7770fcb78e0fbcc13eb101b9a578f34e361deec4 100644 (file)
@@ -126,7 +126,7 @@ static int add_and_get_index(const char *parent, const char *child, int cpu)
 static int function_handler(struct trace_seq *s, struct tep_record *record,
                            struct tep_event *event, void *context)
 {
-       struct tep_handle *pevent = event->pevent;
+       struct tep_handle *tep = event->tep;
        unsigned long long function;
        unsigned long long pfunction;
        const char *func;
@@ -136,12 +136,12 @@ static int function_handler(struct trace_seq *s, struct tep_record *record,
        if (tep_get_field_val(s, event, "ip", record, &function, 1))
                return trace_seq_putc(s, '!');
 
-       func = tep_find_function(pevent, function);
+       func = tep_find_function(tep, function);
 
        if (tep_get_field_val(s, event, "parent_ip", record, &pfunction, 1))
                return trace_seq_putc(s, '!');
 
-       parent = tep_find_function(pevent, pfunction);
+       parent = tep_find_function(tep, pfunction);
 
        if (parent && ftrace_indent->set)
                index = add_and_get_index(parent, func, record->cpu);
@@ -164,9 +164,9 @@ static int function_handler(struct trace_seq *s, struct tep_record *record,
        return 0;
 }
 
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
 {
-       tep_register_event_handler(pevent, -1, "ftrace", "function",
+       tep_register_event_handler(tep, -1, "ftrace", "function",
                                   function_handler, NULL);
 
        tep_plugin_add_options("ftrace", plugin_options);
@@ -174,11 +174,11 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
        return 0;
 }
 
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
 {
        int i, x;
 
-       tep_unregister_event_handler(pevent, -1, "ftrace", "function",
+       tep_unregister_event_handler(tep, -1, "ftrace", "function",
                                     function_handler, NULL);
 
        for (i = 0; i <= cpus; i++) {
index 5db5e401275ff3fc2a54182b8abe9434c40be7a3..bb434e0ed03abc93d5fb26f37c2269fc5f457761 100644 (file)
@@ -67,23 +67,23 @@ static int timer_start_handler(struct trace_seq *s,
        return 0;
 }
 
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
 {
-       tep_register_event_handler(pevent, -1,
+       tep_register_event_handler(tep, -1,
                                   "timer", "hrtimer_expire_entry",
                                   timer_expire_handler, NULL);
 
-       tep_register_event_handler(pevent, -1, "timer", "hrtimer_start",
+       tep_register_event_handler(tep, -1, "timer", "hrtimer_start",
                                   timer_start_handler, NULL);
        return 0;
 }
 
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
 {
-       tep_unregister_event_handler(pevent, -1,
+       tep_unregister_event_handler(tep, -1,
                                     "timer", "hrtimer_expire_entry",
                                     timer_expire_handler, NULL);
 
-       tep_unregister_event_handler(pevent, -1, "timer", "hrtimer_start",
+       tep_unregister_event_handler(tep, -1, "timer", "hrtimer_start",
                                     timer_start_handler, NULL);
 }
index a5e34135dd6a26dda274461bd2df14a5a4c06399..04fc125f38cb3ef8c35dc293a96a06b07b24a2d8 100644 (file)
@@ -48,16 +48,16 @@ process_jiffies_to_msecs(struct trace_seq *s, unsigned long long *args)
        return jiffies;
 }
 
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
 {
-       tep_register_print_function(pevent,
+       tep_register_print_function(tep,
                                    process_jbd2_dev_to_name,
                                    TEP_FUNC_ARG_STRING,
                                    "jbd2_dev_to_name",
                                    TEP_FUNC_ARG_INT,
                                    TEP_FUNC_ARG_VOID);
 
-       tep_register_print_function(pevent,
+       tep_register_print_function(tep,
                                    process_jiffies_to_msecs,
                                    TEP_FUNC_ARG_LONG,
                                    "jiffies_to_msecs",
@@ -66,11 +66,11 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
        return 0;
 }
 
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
 {
-       tep_unregister_print_function(pevent, process_jbd2_dev_to_name,
+       tep_unregister_print_function(tep, process_jbd2_dev_to_name,
                                      "jbd2_dev_to_name");
 
-       tep_unregister_print_function(pevent, process_jiffies_to_msecs,
+       tep_unregister_print_function(tep, process_jiffies_to_msecs,
                                      "jiffies_to_msecs");
 }
index 0e3c601f9ed19313ef5c2188c63a16319533a77c..edaec5d962c3df00b47f9fdad5c760549903790a 100644 (file)
@@ -39,57 +39,57 @@ static int call_site_handler(struct trace_seq *s, struct tep_record *record,
        if (tep_read_number_field(field, data, &val))
                return 1;
 
-       func = tep_find_function(event->pevent, val);
+       func = tep_find_function(event->tep, val);
        if (!func)
                return 1;
 
-       addr = tep_find_function_address(event->pevent, val);
+       addr = tep_find_function_address(event->tep, val);
 
        trace_seq_printf(s, "(%s+0x%x) ", func, (int)(val - addr));
        return 1;
 }
 
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
 {
-       tep_register_event_handler(pevent, -1, "kmem", "kfree",
+       tep_register_event_handler(tep, -1, "kmem", "kfree",
                                   call_site_handler, NULL);
 
-       tep_register_event_handler(pevent, -1, "kmem", "kmalloc",
+       tep_register_event_handler(tep, -1, "kmem", "kmalloc",
                                   call_site_handler, NULL);
 
-       tep_register_event_handler(pevent, -1, "kmem", "kmalloc_node",
+       tep_register_event_handler(tep, -1, "kmem", "kmalloc_node",
                                   call_site_handler, NULL);
 
-       tep_register_event_handler(pevent, -1, "kmem", "kmem_cache_alloc",
+       tep_register_event_handler(tep, -1, "kmem", "kmem_cache_alloc",
                                   call_site_handler, NULL);
 
-       tep_register_event_handler(pevent, -1, "kmem",
+       tep_register_event_handler(tep, -1, "kmem",
                                   "kmem_cache_alloc_node",
                                   call_site_handler, NULL);
 
-       tep_register_event_handler(pevent, -1, "kmem", "kmem_cache_free",
+       tep_register_event_handler(tep, -1, "kmem", "kmem_cache_free",
                                   call_site_handler, NULL);
        return 0;
 }
 
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
 {
-       tep_unregister_event_handler(pevent, -1, "kmem", "kfree",
+       tep_unregister_event_handler(tep, -1, "kmem", "kfree",
                                     call_site_handler, NULL);
 
-       tep_unregister_event_handler(pevent, -1, "kmem", "kmalloc",
+       tep_unregister_event_handler(tep, -1, "kmem", "kmalloc",
                                     call_site_handler, NULL);
 
-       tep_unregister_event_handler(pevent, -1, "kmem", "kmalloc_node",
+       tep_unregister_event_handler(tep, -1, "kmem", "kmalloc_node",
                                     call_site_handler, NULL);
 
-       tep_unregister_event_handler(pevent, -1, "kmem", "kmem_cache_alloc",
+       tep_unregister_event_handler(tep, -1, "kmem", "kmem_cache_alloc",
                                     call_site_handler, NULL);
 
-       tep_unregister_event_handler(pevent, -1, "kmem",
+       tep_unregister_event_handler(tep, -1, "kmem",
                                     "kmem_cache_alloc_node",
                                     call_site_handler, NULL);
 
-       tep_unregister_event_handler(pevent, -1, "kmem", "kmem_cache_free",
+       tep_unregister_event_handler(tep, -1, "kmem", "kmem_cache_free",
                                     call_site_handler, NULL);
 }
index 64b9c25a1fd3fcf25c6aed9bdf8f0ddcee40995a..c8e623065a7e4bc515d127660ef2bde07e883da5 100644 (file)
@@ -389,8 +389,8 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
         * We can only use the structure if file is of the same
         * endianness.
         */
-       if (tep_file_bigendian(event->pevent) ==
-           tep_is_host_bigendian(event->pevent)) {
+       if (tep_is_file_bigendian(event->tep) ==
+           tep_is_local_bigendian(event->tep)) {
 
                trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s",
                                 role.level,
@@ -445,40 +445,40 @@ process_is_writable_pte(struct trace_seq *s, unsigned long long *args)
        return pte & PT_WRITABLE_MASK;
 }
 
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
 {
        init_disassembler();
 
-       tep_register_event_handler(pevent, -1, "kvm", "kvm_exit",
+       tep_register_event_handler(tep, -1, "kvm", "kvm_exit",
                                   kvm_exit_handler, NULL);
 
-       tep_register_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
+       tep_register_event_handler(tep, -1, "kvm", "kvm_emulate_insn",
                                   kvm_emulate_insn_handler, NULL);
 
-       tep_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit",
+       tep_register_event_handler(tep, -1, "kvm", "kvm_nested_vmexit",
                                   kvm_nested_vmexit_handler, NULL);
 
-       tep_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject",
+       tep_register_event_handler(tep, -1, "kvm", "kvm_nested_vmexit_inject",
                                   kvm_nested_vmexit_inject_handler, NULL);
 
-       tep_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
+       tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_get_page",
                                   kvm_mmu_get_page_handler, NULL);
 
-       tep_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_sync_page",
+       tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_sync_page",
                                   kvm_mmu_print_role, NULL);
 
-       tep_register_event_handler(pevent, -1,
+       tep_register_event_handler(tep, -1,
                                   "kvmmmu", "kvm_mmu_unsync_page",
                                   kvm_mmu_print_role, NULL);
 
-       tep_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_zap_page",
+       tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_zap_page",
                                   kvm_mmu_print_role, NULL);
 
-       tep_register_event_handler(pevent, -1, "kvmmmu",
+       tep_register_event_handler(tep, -1, "kvmmmu",
                        "kvm_mmu_prepare_zap_page", kvm_mmu_print_role,
                        NULL);
 
-       tep_register_print_function(pevent,
+       tep_register_print_function(tep,
                                    process_is_writable_pte,
                                    TEP_FUNC_ARG_INT,
                                    "is_writable_pte",
@@ -487,37 +487,37 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
        return 0;
 }
 
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
 {
-       tep_unregister_event_handler(pevent, -1, "kvm", "kvm_exit",
+       tep_unregister_event_handler(tep, -1, "kvm", "kvm_exit",
                                     kvm_exit_handler, NULL);
 
-       tep_unregister_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
+       tep_unregister_event_handler(tep, -1, "kvm", "kvm_emulate_insn",
                                     kvm_emulate_insn_handler, NULL);
 
-       tep_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit",
+       tep_unregister_event_handler(tep, -1, "kvm", "kvm_nested_vmexit",
                                     kvm_nested_vmexit_handler, NULL);
 
-       tep_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject",
+       tep_unregister_event_handler(tep, -1, "kvm", "kvm_nested_vmexit_inject",
                                     kvm_nested_vmexit_inject_handler, NULL);
 
-       tep_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
+       tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_get_page",
                                     kvm_mmu_get_page_handler, NULL);
 
-       tep_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_sync_page",
+       tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_sync_page",
                                     kvm_mmu_print_role, NULL);
 
-       tep_unregister_event_handler(pevent, -1,
+       tep_unregister_event_handler(tep, -1,
                                     "kvmmmu", "kvm_mmu_unsync_page",
                                     kvm_mmu_print_role, NULL);
 
-       tep_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_zap_page",
+       tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_zap_page",
                                     kvm_mmu_print_role, NULL);
 
-       tep_unregister_event_handler(pevent, -1, "kvmmmu",
+       tep_unregister_event_handler(tep, -1, "kvmmmu",
                        "kvm_mmu_prepare_zap_page", kvm_mmu_print_role,
                        NULL);
 
-       tep_unregister_print_function(pevent, process_is_writable_pte,
+       tep_unregister_print_function(tep, process_is_writable_pte,
                                      "is_writable_pte");
 }
index e38b9477aad2204da812b0a1b5dc507ce721f133..884303c26b5cd1b7318fedfbb56221df0dacd8a3 100644 (file)
@@ -87,17 +87,17 @@ static int drv_bss_info_changed(struct trace_seq *s,
        return 0;
 }
 
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
 {
-       tep_register_event_handler(pevent, -1, "mac80211",
+       tep_register_event_handler(tep, -1, "mac80211",
                                   "drv_bss_info_changed",
                                   drv_bss_info_changed, NULL);
        return 0;
 }
 
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
 {
-       tep_unregister_event_handler(pevent, -1, "mac80211",
+       tep_unregister_event_handler(tep, -1, "mac80211",
                                     "drv_bss_info_changed",
                                     drv_bss_info_changed, NULL);
 }
index 834c9e378ff85501db70cf387720fc9efe877a47..957389a0ff7ada5d7fb192083dba38bf742ce2c2 100644 (file)
@@ -62,7 +62,7 @@ static void write_and_save_comm(struct tep_format_field *field,
        comm = &s->buffer[len];
 
        /* Help out the comm to ids. This will handle dups */
-       tep_register_comm(field->event->pevent, comm, pid);
+       tep_register_comm(field->event->tep, comm, pid);
 }
 
 static int sched_wakeup_handler(struct trace_seq *s,
@@ -135,27 +135,27 @@ static int sched_switch_handler(struct trace_seq *s,
        return 0;
 }
 
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
 {
-       tep_register_event_handler(pevent, -1, "sched", "sched_switch",
+       tep_register_event_handler(tep, -1, "sched", "sched_switch",
                                   sched_switch_handler, NULL);
 
-       tep_register_event_handler(pevent, -1, "sched", "sched_wakeup",
+       tep_register_event_handler(tep, -1, "sched", "sched_wakeup",
                                   sched_wakeup_handler, NULL);
 
-       tep_register_event_handler(pevent, -1, "sched", "sched_wakeup_new",
+       tep_register_event_handler(tep, -1, "sched", "sched_wakeup_new",
                                   sched_wakeup_handler, NULL);
        return 0;
 }
 
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
 {
-       tep_unregister_event_handler(pevent, -1, "sched", "sched_switch",
+       tep_unregister_event_handler(tep, -1, "sched", "sched_switch",
                                     sched_switch_handler, NULL);
 
-       tep_unregister_event_handler(pevent, -1, "sched", "sched_wakeup",
+       tep_unregister_event_handler(tep, -1, "sched", "sched_wakeup",
                                     sched_wakeup_handler, NULL);
 
-       tep_unregister_event_handler(pevent, -1, "sched", "sched_wakeup_new",
+       tep_unregister_event_handler(tep, -1, "sched", "sched_wakeup_new",
                                     sched_wakeup_handler, NULL);
 }
index 4eba25cc143187d1e22896602c1dc0aae6e7cd99..5d0387a4b65a0951294bb2613cebd78149d022f0 100644 (file)
@@ -414,9 +414,9 @@ unsigned long long process_scsi_trace_parse_cdb(struct trace_seq *s,
        return 0;
 }
 
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
 {
-       tep_register_print_function(pevent,
+       tep_register_print_function(tep,
                                    process_scsi_trace_parse_cdb,
                                    TEP_FUNC_ARG_STRING,
                                    "scsi_trace_parse_cdb",
@@ -427,8 +427,8 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
        return 0;
 }
 
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
 {
-       tep_unregister_print_function(pevent, process_scsi_trace_parse_cdb,
+       tep_unregister_print_function(tep, process_scsi_trace_parse_cdb,
                                      "scsi_trace_parse_cdb");
 }
index bc0496e4c296f9301fe22e5b9dae15606300b812..993b208d0323f8745e6360895fdf3befc30ac5cc 100644 (file)
@@ -120,9 +120,9 @@ unsigned long long process_xen_hypercall_name(struct trace_seq *s,
        return 0;
 }
 
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
 {
-       tep_register_print_function(pevent,
+       tep_register_print_function(tep,
                                    process_xen_hypercall_name,
                                    TEP_FUNC_ARG_STRING,
                                    "xen_hypercall_name",
@@ -131,8 +131,8 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
        return 0;
 }
 
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
 {
-       tep_unregister_print_function(pevent, process_xen_hypercall_name,
+       tep_unregister_print_function(tep, process_xen_hypercall_name,
                                      "xen_hypercall_name");
 }
index 35bff92cc7737d81b073456cf78fd6f5e6e087d3..68caa9a976d0c2e468f07bbe4871e809b21751c7 100644 (file)
@@ -27,7 +27,7 @@ Explanation of the Linux-Kernel Memory Consistency Model
   19. AND THEN THERE WAS ALPHA
   20. THE HAPPENS-BEFORE RELATION: hb
   21. THE PROPAGATES-BEFORE RELATION: pb
-  22. RCU RELATIONS: rcu-link, gp, rscs, rcu-fence, and rb
+  22. RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-fence, and rb
   23. LOCKING
   24. ODDS AND ENDS
 
@@ -1430,8 +1430,8 @@ they execute means that it cannot have cycles.  This requirement is
 the content of the LKMM's "propagation" axiom.
 
 
-RCU RELATIONS: rcu-link, gp, rscs, rcu-fence, and rb
-----------------------------------------------------
+RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-fence, and rb
+-------------------------------------------------------------
 
 RCU (Read-Copy-Update) is a powerful synchronization mechanism.  It
 rests on two concepts: grace periods and read-side critical sections.
@@ -1446,17 +1446,19 @@ As far as memory models are concerned, RCU's main feature is its
 Grace-Period Guarantee, which states that a critical section can never
 span a full grace period.  In more detail, the Guarantee says:
 
-       If a critical section starts before a grace period then it
-       must end before the grace period does.  In addition, every
-       store that propagates to the critical section's CPU before the
-       end of the critical section must propagate to every CPU before
-       the end of the grace period.
+       For any critical section C and any grace period G, at least
+       one of the following statements must hold:
 
-       If a critical section ends after a grace period ends then it
-       must start after the grace period does.  In addition, every
-       store that propagates to the grace period's CPU before the
-       start of the grace period must propagate to every CPU before
-       the start of the critical section.
+(1)    C ends before G does, and in addition, every store that
+       propagates to C's CPU before the end of C must propagate to
+       every CPU before G ends.
+
+(2)    G starts before C does, and in addition, every store that
+       propagates to G's CPU before the start of G must propagate
+       to every CPU before C starts.
+
+In particular, it is not possible for a critical section to both start
+before and end after a grace period.
 
 Here is a simple example of RCU in action:
 
@@ -1483,10 +1485,11 @@ The Grace Period Guarantee tells us that when this code runs, it will
 never end with r1 = 1 and r2 = 0.  The reasoning is as follows.  r1 = 1
 means that P0's store to x propagated to P1 before P1 called
 synchronize_rcu(), so P0's critical section must have started before
-P1's grace period.  On the other hand, r2 = 0 means that P0's store to
-y, which occurs before the end of the critical section, did not
-propagate to P1 before the end of the grace period, violating the
-Guarantee.
+P1's grace period, contrary to part (2) of the Guarantee.  On the
+other hand, r2 = 0 means that P0's store to y, which occurs before the
+end of the critical section, did not propagate to P1 before the end of
+the grace period, contrary to part (1).  Together the results violate
+the Guarantee.
 
 In the kernel's implementations of RCU, the requirements for stores
 to propagate to every CPU are fulfilled by placing strong fences at
@@ -1504,11 +1507,11 @@ before" or "ends after" a grace period?  Some aspects of the meaning
 are pretty obvious, as in the example above, but the details aren't
 entirely clear.  The LKMM formalizes this notion by means of the
 rcu-link relation.  rcu-link encompasses a very general notion of
-"before": Among other things, X ->rcu-link Z includes cases where X
-happens-before or is equal to some event Y which is equal to or comes
-before Z in the coherence order.  When Y = Z this says that X ->rfe Z
-implies X ->rcu-link Z.  In addition, when Y = X it says that X ->fr Z
-and X ->co Z each imply X ->rcu-link Z.
+"before": If E and F are RCU fence events (i.e., rcu_read_lock(),
+rcu_read_unlock(), or synchronize_rcu()) then among other things,
+E ->rcu-link F includes cases where E is po-before some memory-access
+event X, F is po-after some memory-access event Y, and we have any of
+X ->rfe Y, X ->co Y, or X ->fr Y.
 
 The formal definition of the rcu-link relation is more than a little
 obscure, and we won't give it here.  It is closely related to the pb
@@ -1516,171 +1519,173 @@ relation, and the details don't matter unless you want to comb through
 a somewhat lengthy formal proof.  Pretty much all you need to know
 about rcu-link is the information in the preceding paragraph.
 
-The LKMM also defines the gp and rscs relations.  They bring grace
-periods and read-side critical sections into the picture, in the
+The LKMM also defines the rcu-gp and rcu-rscsi relations.  They bring
+grace periods and read-side critical sections into the picture, in the
 following way:
 
-       E ->gp F means there is a synchronize_rcu() fence event S such
-       that E ->po S and either S ->po F or S = F.  In simple terms,
-       there is a grace period po-between E and F.
+       E ->rcu-gp F means that E and F are in fact the same event,
+       and that event is a synchronize_rcu() fence (i.e., a grace
+       period).
 
-       E ->rscs F means there is a critical section delimited by an
-       rcu_read_lock() fence L and an rcu_read_unlock() fence U, such
-       that E ->po U and either L ->po F or L = F.  You can think of
-       this as saying that E and F are in the same critical section
-       (in fact, it also allows E to be po-before the start of the
-       critical section and F to be po-after the end).
+       E ->rcu-rscsi F means that E and F are the rcu_read_unlock()
+       and rcu_read_lock() fence events delimiting some read-side
+       critical section.  (The 'i' at the end of the name emphasizes
+       that this relation is "inverted": It links the end of the
+       critical section to the start.)
 
 If we think of the rcu-link relation as standing for an extended
-"before", then X ->gp Y ->rcu-link Z says that X executes before a
-grace period which ends before Z executes.  (In fact it covers more
-than this, because it also includes cases where X executes before a
-grace period and some store propagates to Z's CPU before Z executes
-but doesn't propagate to some other CPU until after the grace period
-ends.)  Similarly, X ->rscs Y ->rcu-link Z says that X is part of (or
-before the start of) a critical section which starts before Z
-executes.
-
-The LKMM goes on to define the rcu-fence relation as a sequence of gp
-and rscs links separated by rcu-link links, in which the number of gp
-links is >= the number of rscs links.  For example:
+"before", then X ->rcu-gp Y ->rcu-link Z roughly says that X is a
+grace period which ends before Z begins.  (In fact it covers more than
+this, because it also includes cases where some store propagates to
+Z's CPU before Z begins but doesn't propagate to some other CPU until
+after X ends.)  Similarly, X ->rcu-rscsi Y ->rcu-link Z says that X is
+the end of a critical section which starts before Z begins.
+
+The LKMM goes on to define the rcu-fence relation as a sequence of
+rcu-gp and rcu-rscsi links separated by rcu-link links, in which the
+number of rcu-gp links is >= the number of rcu-rscsi links.  For
+example:
 
-       X ->gp Y ->rcu-link Z ->rscs T ->rcu-link U ->gp V
+       X ->rcu-gp Y ->rcu-link Z ->rcu-rscsi T ->rcu-link U ->rcu-gp V
 
 would imply that X ->rcu-fence V, because this sequence contains two
-gp links and only one rscs link.  (It also implies that X ->rcu-fence T
-and Z ->rcu-fence V.)  On the other hand:
+rcu-gp links and one rcu-rscsi link.  (It also implies that
+X ->rcu-fence T and Z ->rcu-fence V.)  On the other hand:
 
-       X ->rscs Y ->rcu-link Z ->rscs T ->rcu-link U ->gp V
+       X ->rcu-rscsi Y ->rcu-link Z ->rcu-rscsi T ->rcu-link U ->rcu-gp V
 
 does not imply X ->rcu-fence V, because the sequence contains only
-one gp link but two rscs links.
+one rcu-gp link but two rcu-rscsi links.
 
 The rcu-fence relation is important because the Grace Period Guarantee
 means that rcu-fence acts kind of like a strong fence.  In particular,
-if W is a write and we have W ->rcu-fence Z, the Guarantee says that W
-will propagate to every CPU before Z executes.
+E ->rcu-fence F implies not only that E begins before F ends, but also
+that any write po-before E will propagate to every CPU before any
+instruction po-after F can execute.  (However, it does not imply that
+E must execute before F; in fact, each synchronize_rcu() fence event
+is linked to itself by rcu-fence as a degenerate case.)
 
 To prove this in full generality requires some intellectual effort.
 We'll consider just a very simple case:
 
-       W ->gp X ->rcu-link Y ->rscs Z.
+       G ->rcu-gp W ->rcu-link Z ->rcu-rscsi F.
 
-This formula means that there is a grace period G and a critical
-section C such that:
+This formula means that G and W are the same event (a grace period),
+and there are events X, Y and a read-side critical section C such that:
 
-       1. W is po-before G;
+       1. G = W is po-before or equal to X;
 
-       2. X is equal to or po-after G;
+       2. X comes "before" Y in some sense (including rfe, co and fr);
 
-       3. X comes "before" Y in some sense;
+       2. Y is po-before Z;
 
-       4. Y is po-before the end of C;
+       4. Z is the rcu_read_unlock() event marking the end of C;
 
-       5. Z is equal to or po-after the start of C.
+       5. F is the rcu_read_lock() event marking the start of C.
 
-From 2 - 4 we deduce that the grace period G ends before the critical
-section C.  Then the second part of the Grace Period Guarantee says
-not only that G starts before C does, but also that W (which executes
-on G's CPU before G starts) must propagate to every CPU before C
-starts.  In particular, W propagates to every CPU before Z executes
-(or finishes executing, in the case where Z is equal to the
-rcu_read_lock() fence event which starts C.)  This sort of reasoning
-can be expanded to handle all the situations covered by rcu-fence.
+From 1 - 4 we deduce that the grace period G ends before the critical
+section C.  Then part (2) of the Grace Period Guarantee says not only
+that G starts before C does, but also that any write which executes on
+G's CPU before G starts must propagate to every CPU before C starts.
+In particular, the write propagates to every CPU before F finishes
+executing and hence before any instruction po-after F can execute.
+This sort of reasoning can be extended to handle all the situations
+covered by rcu-fence.
 
 Finally, the LKMM defines the RCU-before (rb) relation in terms of
 rcu-fence.  This is done in essentially the same way as the pb
 relation was defined in terms of strong-fence.  We will omit the
-details; the end result is that E ->rb F implies E must execute before
-F, just as E ->pb F does (and for much the same reasons).
+details; the end result is that E ->rb F implies E must execute
+before F, just as E ->pb F does (and for much the same reasons).
 
 Putting this all together, the LKMM expresses the Grace Period
 Guarantee by requiring that the rb relation does not contain a cycle.
-Equivalently, this "rcu" axiom requires that there are no events E and
-F with E ->rcu-link F ->rcu-fence E.  Or to put it a third way, the
-axiom requires that there are no cycles consisting of gp and rscs
-alternating with rcu-link, where the number of gp links is >= the
-number of rscs links.
+Equivalently, this "rcu" axiom requires that there are no events E
+and F with E ->rcu-link F ->rcu-fence E.  Or to put it a third way,
+the axiom requires that there are no cycles consisting of rcu-gp and
+rcu-rscsi alternating with rcu-link, where the number of rcu-gp links
+is >= the number of rcu-rscsi links.
 
 Justifying the axiom isn't easy, but it is in fact a valid
 formalization of the Grace Period Guarantee.  We won't attempt to go
 through the detailed argument, but the following analysis gives a
-taste of what is involved.  Suppose we have a violation of the first
-part of the Guarantee: A critical section starts before a grace
-period, and some store propagates to the critical section's CPU before
-the end of the critical section but doesn't propagate to some other
-CPU until after the end of the grace period.
+taste of what is involved.  Suppose both parts of the Guarantee are
+violated: A critical section starts before a grace period, and some
+store propagates to the critical section's CPU before the end of the
+critical section but doesn't propagate to some other CPU until after
+the end of the grace period.
 
 Putting symbols to these ideas, let L and U be the rcu_read_lock() and
 rcu_read_unlock() fence events delimiting the critical section in
 question, and let S be the synchronize_rcu() fence event for the grace
 period.  Saying that the critical section starts before S means there
-are events E and F where E is po-after L (which marks the start of the
-critical section), E is "before" F in the sense of the rcu-link
-relation, and F is po-before the grace period S:
+are events Q and R where Q is po-after L (which marks the start of the
+critical section), Q is "before" R in the sense used by the rcu-link
+relation, and R is po-before the grace period S.  Thus we have:
 
-       L ->po E ->rcu-link F ->po S.
+       L ->rcu-link S.
 
-Let W be the store mentioned above, let Z come before the end of the
+Let W be the store mentioned above, let Y come before the end of the
 critical section and witness that W propagates to the critical
-section's CPU by reading from W, and let Y on some arbitrary CPU be a
-witness that W has not propagated to that CPU, where Y happens after
+section's CPU by reading from W, and let Z on some arbitrary CPU be a
+witness that W has not propagated to that CPU, where Z happens after
 some event X which is po-after S.  Symbolically, this amounts to:
 
-       S ->po X ->hb* Y ->fr W ->rf Z ->po U.
+       S ->po X ->hb* Z ->fr W ->rf Y ->po U.
 
-The fr link from Y to W indicates that W has not propagated to Y's CPU
-at the time that Y executes.  From this, it can be shown (see the
-discussion of the rcu-link relation earlier) that X and Z are related
-by rcu-link, yielding:
+The fr link from Z to W indicates that W has not propagated to Z's CPU
+at the time that Z executes.  From this, it can be shown (see the
+discussion of the rcu-link relation earlier) that S and U are related
+by rcu-link:
 
-       S ->po X ->rcu-link Z ->po U.
+       S ->rcu-link U.
 
-The formulas say that S is po-between F and X, hence F ->gp X.  They
-also say that Z comes before the end of the critical section and E
-comes after its start, hence Z ->rscs E.  From all this we obtain:
+Since S is a grace period we have S ->rcu-gp S, and since L and U are
+the start and end of the critical section C we have U ->rcu-rscsi L.
+From this we obtain:
 
-       F ->gp X ->rcu-link Z ->rscs E ->rcu-link F,
+       S ->rcu-gp S ->rcu-link U ->rcu-rscsi L ->rcu-link S,
 
 a forbidden cycle.  Thus the "rcu" axiom rules out this violation of
 the Grace Period Guarantee.
 
 For something a little more down-to-earth, let's see how the axiom
 works out in practice.  Consider the RCU code example from above, this
-time with statement labels added to the memory access instructions:
+time with statement labels added:
 
        int x, y;
 
        P0()
        {
-               rcu_read_lock();
-               W: WRITE_ONCE(x, 1);
-               X: WRITE_ONCE(y, 1);
-               rcu_read_unlock();
+               L: rcu_read_lock();
+               X: WRITE_ONCE(x, 1);
+               Y: WRITE_ONCE(y, 1);
+               U: rcu_read_unlock();
        }
 
        P1()
        {
                int r1, r2;
 
-               Y: r1 = READ_ONCE(x);
-               synchronize_rcu();
-               Z: r2 = READ_ONCE(y);
+               Z: r1 = READ_ONCE(x);
+               S: synchronize_rcu();
+               W: r2 = READ_ONCE(y);
        }
 
 
-If r2 = 0 at the end then P0's store at X overwrites the value that
-P1's load at Z reads from, so we have Z ->fre X and thus Z ->rcu-link X.
-In addition, there is a synchronize_rcu() between Y and Z, so therefore
-we have Y ->gp Z.
+If r2 = 0 at the end then P0's store at Y overwrites the value that
+P1's load at W reads from, so we have W ->fre Y.  Since S ->po W and
+also Y ->po U, we get S ->rcu-link U.  In addition, S ->rcu-gp S
+because S is a grace period.
 
-If r1 = 1 at the end then P1's load at Y reads from P0's store at W,
-so we have W ->rcu-link Y.  In addition, W and X are in the same critical
-section, so therefore we have X ->rscs W.
+If r1 = 1 at the end then P1's load at Z reads from P0's store at X,
+so we have X ->rfe Z.  Together with L ->po X and Z ->po S, this
+yields L ->rcu-link S.  And since L and U are the start and end of a
+critical section, we have U ->rcu-rscsi L.
 
-Then X ->rscs W ->rcu-link Y ->gp Z ->rcu-link X is a forbidden cycle,
-violating the "rcu" axiom.  Hence the outcome is not allowed by the
-LKMM, as we would expect.
+Then U ->rcu-rscsi L ->rcu-link S ->rcu-gp S ->rcu-link U is a
+forbidden cycle, violating the "rcu" axiom.  Hence the outcome is not
+allowed by the LKMM, as we would expect.
 
 For contrast, let's see what can happen in a more complicated example:
 
@@ -1690,51 +1695,52 @@ For contrast, let's see what can happen in a more complicated example:
        {
                int r0;
 
-               rcu_read_lock();
-               W: r0 = READ_ONCE(x);
-               X: WRITE_ONCE(y, 1);
-               rcu_read_unlock();
+               L0: rcu_read_lock();
+                   r0 = READ_ONCE(x);
+                   WRITE_ONCE(y, 1);
+               U0: rcu_read_unlock();
        }
 
        P1()
        {
                int r1;
 
-               Y: r1 = READ_ONCE(y);
-               synchronize_rcu();
-               Z: WRITE_ONCE(z, 1);
+                   r1 = READ_ONCE(y);
+               S1: synchronize_rcu();
+                   WRITE_ONCE(z, 1);
        }
 
        P2()
        {
                int r2;
 
-               rcu_read_lock();
-               U: r2 = READ_ONCE(z);
-               V: WRITE_ONCE(x, 1);
-               rcu_read_unlock();
+               L2: rcu_read_lock();
+                   r2 = READ_ONCE(z);
+                   WRITE_ONCE(x, 1);
+               U2: rcu_read_unlock();
        }
 
 If r0 = r1 = r2 = 1 at the end, then similar reasoning to before shows
-that W ->rscs X ->rcu-link Y ->gp Z ->rcu-link U ->rscs V ->rcu-link W.
-However this cycle is not forbidden, because the sequence of relations
-contains fewer instances of gp (one) than of rscs (two).  Consequently
-the outcome is allowed by the LKMM.  The following instruction timing
-diagram shows how it might actually occur:
+that U0 ->rcu-rscsi L0 ->rcu-link S1 ->rcu-gp S1 ->rcu-link U2 ->rcu-rscsi
+L2 ->rcu-link U0.  However this cycle is not forbidden, because the
+sequence of relations contains fewer instances of rcu-gp (one) than of
+rcu-rscsi (two).  Consequently the outcome is allowed by the LKMM.
+The following instruction timing diagram shows how it might actually
+occur:
 
 P0                     P1                      P2
 --------------------   --------------------    --------------------
 rcu_read_lock()
-X: WRITE_ONCE(y, 1)
-                       Y: r1 = READ_ONCE(y)
+WRITE_ONCE(y, 1)
+                       r1 = READ_ONCE(y)
                        synchronize_rcu() starts
                        .                       rcu_read_lock()
-                       .                       V: WRITE_ONCE(x, 1)
-W: r0 = READ_ONCE(x)   .
+                       .                       WRITE_ONCE(x, 1)
+r0 = READ_ONCE(x)      .
 rcu_read_unlock()      .
                        synchronize_rcu() ends
-                       Z: WRITE_ONCE(z, 1)
-                                               U: r2 = READ_ONCE(z)
+                       WRITE_ONCE(z, 1)
+                                               r2 = READ_ONCE(z)
                                                rcu_read_unlock()
 
 This requires P0 and P2 to execute their loads and stores out of
@@ -1744,6 +1750,15 @@ section in P0 both starts before P1's grace period does and ends
 before it does, and the critical section in P2 both starts after P1's
 grace period does and ends after it does.
 
+Addendum: The LKMM now supports SRCU (Sleepable Read-Copy-Update) in
+addition to normal RCU.  The ideas involved are much the same as
+above, with new relations srcu-gp and srcu-rscsi added to represent
+SRCU grace periods and read-side critical sections.  There is a
+restriction on the srcu-gp and srcu-rscsi links that can appear in an
+rcu-fence sequence (the srcu-rscsi links must be paired with srcu-gp
+links having the same SRCU domain with proper nesting); the details
+are relatively unimportant.
+
 
 LOCKING
 -------
index 0f2c366518c68e2fa41697aa057fc2bb740698ee..2b87f3971548c06ac8a7f56fb2acd0c14cc63e0c 100644 (file)
@@ -20,13 +20,17 @@ that litmus test to be exercised within the Linux kernel.
 REQUIREMENTS
 ============
 
-Version 7.49 of the "herd7" and "klitmus7" tools must be downloaded
-separately:
+Version 7.52 or higher of the "herd7" and "klitmus7" tools must be
+downloaded separately:
 
   https://github.com/herd/herdtools7
 
 See "herdtools7/INSTALL.md" for installation instructions.
 
+Note that although these tools usually provide backwards compatibility,
+this is not absolutely guaranteed.  Therefore, if a later version does
+not work, please try using the exact version called out above.
+
 
 ==================
 BASIC USAGE: HERD7
@@ -221,8 +225,29 @@ The Linux-kernel memory model has the following limitations:
                additional call_rcu() process to the site of the
                emulated rcu-barrier().
 
-       e.      Sleepable RCU (SRCU) is not modeled.  It can be
-               emulated, but perhaps not simply.
+       e.      Although sleepable RCU (SRCU) is now modeled, there
+               are some subtle differences between its semantics and
+               those in the Linux kernel.  For example, the kernel
+               might interpret the following sequence as two partially
+               overlapping SRCU read-side critical sections:
+
+                        1  r1 = srcu_read_lock(&my_srcu);
+                        2  do_something_1();
+                        3  r2 = srcu_read_lock(&my_srcu);
+                        4  do_something_2();
+                        5  srcu_read_unlock(&my_srcu, r1);
+                        6  do_something_3();
+                        7  srcu_read_unlock(&my_srcu, r2);
+
+               In contrast, LKMM will interpret this as a nested pair of
+               SRCU read-side critical sections, with the outer critical
+               section spanning lines 1-7 and the inner critical section
+               spanning lines 3-5.
+
+               This difference would be more of a concern had anyone
+               identified a reasonable use case for partially overlapping
+               SRCU read-side critical sections.  For more information,
+               please see: https://paulmck.livejournal.com/40593.html
 
        f.      Reader-writer locking is not modeled.  It can be
                emulated in litmus tests using atomic read-modify-write
index 796513362c0522596f4fd9fdd967257839802ced..def9131d3d8e3292868458cd9baba066cda8ff14 100644 (file)
@@ -33,8 +33,14 @@ enum Barriers = 'wmb (*smp_wmb*) ||
                'after-unlock-lock (*smp_mb__after_unlock_lock*)
 instructions F[Barriers]
 
+(* SRCU *)
+enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu
+instructions SRCU[SRCU]
+(* All srcu events *)
+let Srcu = Srcu-lock | Srcu-unlock | Sync-srcu
+
 (* Compute matching pairs of nested Rcu-lock and Rcu-unlock *)
-let matched = let rec
+let rcu-rscs = let rec
            unmatched-locks = Rcu-lock \ domain(matched)
        and unmatched-unlocks = Rcu-unlock \ range(matched)
        and unmatched = unmatched-locks | unmatched-unlocks
@@ -46,8 +52,27 @@ let matched = let rec
        in matched
 
 (* Validate nesting *)
-flag ~empty Rcu-lock \ domain(matched) as unbalanced-rcu-locking
-flag ~empty Rcu-unlock \ range(matched) as unbalanced-rcu-locking
+flag ~empty Rcu-lock \ domain(rcu-rscs) as unbalanced-rcu-locking
+flag ~empty Rcu-unlock \ range(rcu-rscs) as unbalanced-rcu-locking
+
+(* Compute matching pairs of nested Srcu-lock and Srcu-unlock *)
+let srcu-rscs = let rec
+           unmatched-locks = Srcu-lock \ domain(matched)
+       and unmatched-unlocks = Srcu-unlock \ range(matched)
+       and unmatched = unmatched-locks | unmatched-unlocks
+       and unmatched-po = ([unmatched] ; po ; [unmatched]) & loc
+       and unmatched-locks-to-unlocks =
+               ([unmatched-locks] ; po ; [unmatched-unlocks]) & loc
+       and matched = matched | (unmatched-locks-to-unlocks \
+               (unmatched-po ; unmatched-po))
+       in matched
+
+(* Validate nesting *)
+flag ~empty Srcu-lock \ domain(srcu-rscs) as unbalanced-srcu-locking
+flag ~empty Srcu-unlock \ range(srcu-rscs) as unbalanced-srcu-locking
+
+(* Check for use of synchronize_srcu() inside an RCU critical section *)
+flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep
 
-(* Outermost level of nesting only *)
-let crit = matched \ (po^-1 ; matched ; po^-1)
+(* Validate SRCU dynamic match *)
+flag ~empty different-values(srcu-rscs) as srcu-bad-nesting
index 8f23c74a96fdca4775bc463b46838c1d57c496ec..8dcb37835b613c69c90377be69d41f98ba8facd5 100644 (file)
@@ -33,7 +33,7 @@ let mb = ([M] ; fencerel(Mb) ; [M]) |
        ([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) |
        ([M] ; po ; [UL] ; (co | po) ; [LKW] ;
                fencerel(After-unlock-lock) ; [M])
-let gp = po ; [Sync-rcu] ; po?
+let gp = po ; [Sync-rcu | Sync-srcu] ; po?
 
 let strong-fence = mb | gp
 
@@ -91,32 +91,47 @@ acyclic pb as propagation
 (*******)
 
 (*
- * Effect of read-side critical section proceeds from the rcu_read_lock()
- * onward on the one hand and from the rcu_read_unlock() backwards on the
- * other hand.
+ * Effects of read-side critical sections proceed from the rcu_read_unlock()
+ * or srcu_read_unlock() backwards on the one hand, and from the
+ * rcu_read_lock() or srcu_read_lock() forwards on the other hand.
+ *
+ * In the definition of rcu-fence below, the po term at the left-hand side
+ * of each disjunct and the po? term at the right-hand end have been factored
+ * out.  They have been moved into the definitions of rcu-link and rb.
+ * This was necessary in order to apply the "& loc" tests correctly.
  *)
-let rscs = po ; crit^-1 ; po?
+let rcu-gp = [Sync-rcu]                (* Compare with gp *)
+let srcu-gp = [Sync-srcu]
+let rcu-rscsi = rcu-rscs^-1
+let srcu-rscsi = srcu-rscs^-1
 
 (*
  * The synchronize_rcu() strong fence is special in that it can order not
  * one but two non-rf relations, but only in conjunction with an RCU
  * read-side critical section.
  *)
-let rcu-link = hb* ; pb* ; prop
+let rcu-link = po? ; hb* ; pb* ; prop ; po
 
 (*
  * Any sequence containing at least as many grace periods as RCU read-side
  * critical sections (joined by rcu-link) acts as a generalized strong fence.
+ * Likewise for SRCU grace periods and read-side critical sections, provided
+ * the synchronize_srcu() and srcu_read_[un]lock() calls refer to the same
+ * struct srcu_struct location.
  *)
-let rec rcu-fence = gp |
-       (gp ; rcu-link ; rscs) |
-       (rscs ; rcu-link ; gp) |
-       (gp ; rcu-link ; rcu-fence ; rcu-link ; rscs) |
-       (rscs ; rcu-link ; rcu-fence ; rcu-link ; gp) |
+let rec rcu-fence = rcu-gp | srcu-gp |
+       (rcu-gp ; rcu-link ; rcu-rscsi) |
+       ((srcu-gp ; rcu-link ; srcu-rscsi) & loc) |
+       (rcu-rscsi ; rcu-link ; rcu-gp) |
+       ((srcu-rscsi ; rcu-link ; srcu-gp) & loc) |
+       (rcu-gp ; rcu-link ; rcu-fence ; rcu-link ; rcu-rscsi) |
+       ((srcu-gp ; rcu-link ; rcu-fence ; rcu-link ; srcu-rscsi) & loc) |
+       (rcu-rscsi ; rcu-link ; rcu-fence ; rcu-link ; rcu-gp) |
+       ((srcu-rscsi ; rcu-link ; rcu-fence ; rcu-link ; srcu-gp) & loc) |
        (rcu-fence ; rcu-link ; rcu-fence)
 
 (* rb orders instructions just as pb does *)
-let rb = prop ; rcu-fence ; hb* ; pb*
+let rb = prop ; po ; rcu-fence ; po? ; hb* ; pb*
 
 irreflexive rb as rcu
 
index b27911cc087d426c49c22e3d3c41cd63e4230d07..551eeaa389d40cc952d2db833c1837a9265ced15 100644 (file)
@@ -47,6 +47,12 @@ rcu_read_unlock() { __fence{rcu-unlock}; }
 synchronize_rcu() { __fence{sync-rcu}; }
 synchronize_rcu_expedited() { __fence{sync-rcu}; }
 
+// SRCU
+srcu_read_lock(X)  __srcu{srcu-lock}(X)
+srcu_read_unlock(X,Y) { __srcu{srcu-unlock}(X,Y); }
+synchronize_srcu(X)  { __srcu{sync-srcu}(X); }
+synchronize_srcu_expedited(X)  { __srcu{sync-srcu}(X); }
+
 // Atomic
 atomic_read(X) READ_ONCE(*X)
 atomic_set(X,V) { WRITE_ONCE(*X,V); }
index 305ded17e741193ca98488ca9ad83a0eedecda85..a059d1a6d8a296abed33d50e919c7b92bb67091a 100644 (file)
@@ -6,9 +6,6 @@
 
 (*
  * Generate coherence orders and handle lock operations
- *
- * Warning: spin_is_locked() crashes herd7 versions strictly before 7.48.
- * spin_is_locked() is functional from herd7 version 7.49.
  *)
 
 include "cross.cat"
index c9d038f91af6b345044bb116680a66f140d2ead1..53f8be0f4a1f763e613b649aeac98399bd34eb69 100644 (file)
@@ -25,14 +25,17 @@ LIBSUBCMD           = $(LIBSUBCMD_OUTPUT)libsubcmd.a
 OBJTOOL    := $(OUTPUT)objtool
 OBJTOOL_IN := $(OBJTOOL)-in.o
 
+LIBELF_FLAGS := $(shell pkg-config libelf --cflags 2>/dev/null)
+LIBELF_LIBS  := $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
+
 all: $(OBJTOOL)
 
 INCLUDES := -I$(srctree)/tools/include \
            -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
            -I$(srctree)/tools/objtool/arch/$(ARCH)/include
 WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
-CFLAGS   += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES)
-LDFLAGS  += -lelf $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
+CFLAGS   += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
+LDFLAGS  += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
 
 # Allow old libelf to be used:
 elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
index b0d7dc3d71b5ac21e8acb36b1c1f38bcf06879c4..7a111a77b7aa418cb2c50edaf62a470150f7f888 100644 (file)
 #define INSN_STACK             8
 #define INSN_BUG               9
 #define INSN_NOP               10
-#define INSN_OTHER             11
+#define INSN_STAC              11
+#define INSN_CLAC              12
+#define INSN_STD               13
+#define INSN_CLD               14
+#define INSN_OTHER             15
 #define INSN_LAST              INSN_OTHER
 
 enum op_dest_type {
@@ -41,6 +45,7 @@ enum op_dest_type {
        OP_DEST_REG_INDIRECT,
        OP_DEST_MEM,
        OP_DEST_PUSH,
+       OP_DEST_PUSHF,
        OP_DEST_LEAVE,
 };
 
@@ -55,6 +60,7 @@ enum op_src_type {
        OP_SRC_REG_INDIRECT,
        OP_SRC_CONST,
        OP_SRC_POP,
+       OP_SRC_POPF,
        OP_SRC_ADD,
        OP_SRC_AND,
 };
index 540a209b78ab3cd6ae3b972c57b338dc0aa9b58d..472e991f6512d26bcfd81e660fc8f6244a3eb24e 100644 (file)
@@ -357,19 +357,26 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
                /* pushf */
                *type = INSN_STACK;
                op->src.type = OP_SRC_CONST;
-               op->dest.type = OP_DEST_PUSH;
+               op->dest.type = OP_DEST_PUSHF;
                break;
 
        case 0x9d:
                /* popf */
                *type = INSN_STACK;
-               op->src.type = OP_SRC_POP;
+               op->src.type = OP_SRC_POPF;
                op->dest.type = OP_DEST_MEM;
                break;
 
        case 0x0f:
 
-               if (op2 >= 0x80 && op2 <= 0x8f) {
+               if (op2 == 0x01) {
+
+                       if (modrm == 0xca)
+                               *type = INSN_CLAC;
+                       else if (modrm == 0xcb)
+                               *type = INSN_STAC;
+
+               } else if (op2 >= 0x80 && op2 <= 0x8f) {
 
                        *type = INSN_JUMP_CONDITIONAL;
 
@@ -444,6 +451,14 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
                *type = INSN_CALL;
                break;
 
+       case 0xfc:
+               *type = INSN_CLD;
+               break;
+
+       case 0xfd:
+               *type = INSN_STD;
+               break;
+
        case 0xff:
                if (modrm_reg == 2 || modrm_reg == 3)
 
index 694abc628e9b3060b2252c1e8c86af4d6176e518..f3b378126011f5eeb8b77e330e6c2a0ae2624a50 100644 (file)
@@ -29,7 +29,7 @@
 #include "builtin.h"
 #include "check.h"
 
-bool no_fp, no_unreachable, retpoline, module;
+bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess;
 
 static const char * const check_usage[] = {
        "objtool check [<options>] file.o",
@@ -41,6 +41,8 @@ const struct option check_options[] = {
        OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
        OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
        OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
+       OPT_BOOLEAN('b', "backtrace", &backtrace, "unwind on error"),
+       OPT_BOOLEAN('a', "uaccess", &uaccess, "enable uaccess checking"),
        OPT_END(),
 };
 
index 28ff40e19a1413823b9b06ae4d1f1b42d922e850..69762f9c5602cf43bc9aac083d89f52d361c3013 100644 (file)
@@ -20,7 +20,7 @@
 #include <subcmd/parse-options.h>
 
 extern const struct option check_options[];
-extern bool no_fp, no_unreachable, retpoline, module;
+extern bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess;
 
 extern int cmd_check(int argc, const char **argv);
 extern int cmd_orc(int argc, const char **argv);
index 0414a0d522621d4ca973240979e89c07d6cd4f8a..ac743a1d53ab321a8a664fab6ef9a4f3a04b6dfa 100644 (file)
@@ -31,6 +31,7 @@
 struct alternative {
        struct list_head list;
        struct instruction *insn;
+       bool skip_orig;
 };
 
 const char *objname;
@@ -104,29 +105,6 @@ static struct instruction *next_insn_same_func(struct objtool_file *file,
        for (insn = next_insn_same_sec(file, insn); insn;               \
             insn = next_insn_same_sec(file, insn))
 
-/*
- * Check if the function has been manually whitelisted with the
- * STACK_FRAME_NON_STANDARD macro, or if it should be automatically whitelisted
- * due to its use of a context switching instruction.
- */
-static bool ignore_func(struct objtool_file *file, struct symbol *func)
-{
-       struct rela *rela;
-
-       /* check for STACK_FRAME_NON_STANDARD */
-       if (file->whitelist && file->whitelist->rela)
-               list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
-                       if (rela->sym->type == STT_SECTION &&
-                           rela->sym->sec == func->sec &&
-                           rela->addend == func->offset)
-                               return true;
-                       if (rela->sym->type == STT_FUNC && rela->sym == func)
-                               return true;
-               }
-
-       return false;
-}
-
 /*
  * This checks to see if the given function is a "noreturn" function.
  *
@@ -165,6 +143,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
                "fortify_panic",
                "usercopy_abort",
                "machine_real_restart",
+               "rewind_stack_do_exit",
        };
 
        if (func->bind == STB_WEAK)
@@ -436,18 +415,107 @@ static void add_ignores(struct objtool_file *file)
        struct instruction *insn;
        struct section *sec;
        struct symbol *func;
+       struct rela *rela;
 
-       for_each_sec(file, sec) {
-               list_for_each_entry(func, &sec->symbol_list, list) {
-                       if (func->type != STT_FUNC)
-                               continue;
+       sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
+       if (!sec)
+               return;
+
+       list_for_each_entry(rela, &sec->rela_list, list) {
+               switch (rela->sym->type) {
+               case STT_FUNC:
+                       func = rela->sym;
+                       break;
 
-                       if (!ignore_func(file, func))
+               case STT_SECTION:
+                       func = find_symbol_by_offset(rela->sym->sec, rela->addend);
+                       if (!func || func->type != STT_FUNC)
                                continue;
+                       break;
 
-                       func_for_each_insn_all(file, func, insn)
-                               insn->ignore = true;
+               default:
+                       WARN("unexpected relocation symbol type in %s: %d", sec->name, rela->sym->type);
+                       continue;
                }
+
+               func_for_each_insn_all(file, func, insn)
+                       insn->ignore = true;
+       }
+}
+
+/*
+ * This is a whitelist of functions that is allowed to be called with AC set.
+ * The list is meant to be minimal and only contains compiler instrumentation
+ * ABI and a few functions used to implement *_{to,from}_user() functions.
+ *
+ * These functions must not directly change AC, but may PUSHF/POPF.
+ */
+static const char *uaccess_safe_builtin[] = {
+       /* KASAN */
+       "kasan_report",
+       "check_memory_region",
+       /* KASAN out-of-line */
+       "__asan_loadN_noabort",
+       "__asan_load1_noabort",
+       "__asan_load2_noabort",
+       "__asan_load4_noabort",
+       "__asan_load8_noabort",
+       "__asan_load16_noabort",
+       "__asan_storeN_noabort",
+       "__asan_store1_noabort",
+       "__asan_store2_noabort",
+       "__asan_store4_noabort",
+       "__asan_store8_noabort",
+       "__asan_store16_noabort",
+       /* KASAN in-line */
+       "__asan_report_load_n_noabort",
+       "__asan_report_load1_noabort",
+       "__asan_report_load2_noabort",
+       "__asan_report_load4_noabort",
+       "__asan_report_load8_noabort",
+       "__asan_report_load16_noabort",
+       "__asan_report_store_n_noabort",
+       "__asan_report_store1_noabort",
+       "__asan_report_store2_noabort",
+       "__asan_report_store4_noabort",
+       "__asan_report_store8_noabort",
+       "__asan_report_store16_noabort",
+       /* KCOV */
+       "write_comp_data",
+       "__sanitizer_cov_trace_pc",
+       "__sanitizer_cov_trace_const_cmp1",
+       "__sanitizer_cov_trace_const_cmp2",
+       "__sanitizer_cov_trace_const_cmp4",
+       "__sanitizer_cov_trace_const_cmp8",
+       "__sanitizer_cov_trace_cmp1",
+       "__sanitizer_cov_trace_cmp2",
+       "__sanitizer_cov_trace_cmp4",
+       "__sanitizer_cov_trace_cmp8",
+       /* UBSAN */
+       "ubsan_type_mismatch_common",
+       "__ubsan_handle_type_mismatch",
+       "__ubsan_handle_type_mismatch_v1",
+       /* misc */
+       "csum_partial_copy_generic",
+       "__memcpy_mcsafe",
+       "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
+       NULL
+};
+
+static void add_uaccess_safe(struct objtool_file *file)
+{
+       struct symbol *func;
+       const char **name;
+
+       if (!uaccess)
+               return;
+
+       for (name = uaccess_safe_builtin; *name; name++) {
+               func = find_symbol_by_name(file->elf, *name);
+               if (!func)
+                       continue;
+
+               func->alias->uaccess_safe = true;
        }
 }
 
@@ -457,13 +525,13 @@ static void add_ignores(struct objtool_file *file)
  * But it at least allows objtool to understand the control flow *around* the
  * retpoline.
  */
-static int add_nospec_ignores(struct objtool_file *file)
+static int add_ignore_alternatives(struct objtool_file *file)
 {
        struct section *sec;
        struct rela *rela;
        struct instruction *insn;
 
-       sec = find_section_by_name(file->elf, ".rela.discard.nospec");
+       sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
        if (!sec)
                return 0;
 
@@ -475,7 +543,7 @@ static int add_nospec_ignores(struct objtool_file *file)
 
                insn = find_insn(file, rela->sym->sec, rela->addend);
                if (!insn) {
-                       WARN("bad .discard.nospec entry");
+                       WARN("bad .discard.ignore_alts entry");
                        return -1;
                }
 
@@ -524,7 +592,8 @@ static int add_jump_destinations(struct objtool_file *file)
                        continue;
                } else {
                        /* sibling call */
-                       insn->jump_dest = 0;
+                       insn->call_dest = rela->sym;
+                       insn->jump_dest = NULL;
                        continue;
                }
 
@@ -546,25 +615,38 @@ static int add_jump_destinations(struct objtool_file *file)
                }
 
                /*
-                * For GCC 8+, create parent/child links for any cold
-                * subfunctions.  This is _mostly_ redundant with a similar
-                * initialization in read_symbols().
-                *
-                * If a function has aliases, we want the *first* such function
-                * in the symbol table to be the subfunction's parent.  In that
-                * case we overwrite the initialization done in read_symbols().
-                *
-                * However this code can't completely replace the
-                * read_symbols() code because this doesn't detect the case
-                * where the parent function's only reference to a subfunction
-                * is through a switch table.
+                * Cross-function jump.
                 */
                if (insn->func && insn->jump_dest->func &&
-                   insn->func != insn->jump_dest->func &&
-                   !strstr(insn->func->name, ".cold.") &&
-                   strstr(insn->jump_dest->func->name, ".cold.")) {
-                       insn->func->cfunc = insn->jump_dest->func;
-                       insn->jump_dest->func->pfunc = insn->func;
+                   insn->func != insn->jump_dest->func) {
+
+                       /*
+                        * For GCC 8+, create parent/child links for any cold
+                        * subfunctions.  This is _mostly_ redundant with a
+                        * similar initialization in read_symbols().
+                        *
+                        * If a function has aliases, we want the *first* such
+                        * function in the symbol table to be the subfunction's
+                        * parent.  In that case we overwrite the
+                        * initialization done in read_symbols().
+                        *
+                        * However this code can't completely replace the
+                        * read_symbols() code because this doesn't detect the
+                        * case where the parent function's only reference to a
+                        * subfunction is through a switch table.
+                        */
+                       if (!strstr(insn->func->name, ".cold.") &&
+                           strstr(insn->jump_dest->func->name, ".cold.")) {
+                               insn->func->cfunc = insn->jump_dest->func;
+                               insn->jump_dest->func->pfunc = insn->func;
+
+                       } else if (insn->jump_dest->func->pfunc != insn->func->pfunc &&
+                                  insn->jump_dest->offset == insn->jump_dest->func->offset) {
+
+                               /* sibling class */
+                               insn->call_dest = insn->jump_dest->func;
+                               insn->jump_dest = NULL;
+                       }
                }
        }
 
@@ -633,9 +715,6 @@ static int add_call_destinations(struct objtool_file *file)
  *    conditionally jumps to the _end_ of the entry.  We have to modify these
  *    jumps' destinations to point back to .text rather than the end of the
  *    entry in .altinstr_replacement.
- *
- * 4. It has been requested that we don't validate the !POPCNT feature path
- *    which is a "very very small percentage of machines".
  */
 static int handle_group_alt(struct objtool_file *file,
                            struct special_alt *special_alt,
@@ -651,9 +730,6 @@ static int handle_group_alt(struct objtool_file *file,
                if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
                        break;
 
-               if (special_alt->skip_orig)
-                       insn->type = INSN_NOP;
-
                insn->alt_group = true;
                last_orig_insn = insn;
        }
@@ -695,6 +771,7 @@ static int handle_group_alt(struct objtool_file *file,
                last_new_insn = insn;
 
                insn->ignore = orig_insn->ignore_alts;
+               insn->func = orig_insn->func;
 
                if (insn->type != INSN_JUMP_CONDITIONAL &&
                    insn->type != INSN_JUMP_UNCONDITIONAL)
@@ -817,6 +894,8 @@ static int add_special_section_alts(struct objtool_file *file)
                }
 
                alt->insn = new_insn;
+               alt->skip_orig = special_alt->skip_orig;
+               orig_insn->ignore_alts |= special_alt->skip_alt;
                list_add_tail(&alt->list, &orig_insn->alts);
 
                list_del(&special_alt->list);
@@ -1238,8 +1317,9 @@ static int decode_sections(struct objtool_file *file)
                return ret;
 
        add_ignores(file);
+       add_uaccess_safe(file);
 
-       ret = add_nospec_ignores(file);
+       ret = add_ignore_alternatives(file);
        if (ret)
                return ret;
 
@@ -1319,11 +1399,11 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s
                return 0;
 
        /* push */
-       if (op->dest.type == OP_DEST_PUSH)
+       if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
                cfa->offset += 8;
 
        /* pop */
-       if (op->src.type == OP_SRC_POP)
+       if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
                cfa->offset -= 8;
 
        /* add immediate to sp */
@@ -1580,6 +1660,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
                        break;
 
                case OP_SRC_POP:
+               case OP_SRC_POPF:
                        if (!state->drap && op->dest.type == OP_DEST_REG &&
                            op->dest.reg == cfa->base) {
 
@@ -1644,6 +1725,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
                break;
 
        case OP_DEST_PUSH:
+       case OP_DEST_PUSHF:
                state->stack_size += 8;
                if (cfa->base == CFI_SP)
                        cfa->offset += 8;
@@ -1734,7 +1816,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
                break;
 
        case OP_DEST_MEM:
-               if (op->src.type != OP_SRC_POP) {
+               if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
                        WARN_FUNC("unknown stack-related memory operation",
                                  insn->sec, insn->offset);
                        return -1;
@@ -1798,6 +1880,50 @@ static bool insn_state_match(struct instruction *insn, struct insn_state *state)
        return false;
 }
 
+static inline bool func_uaccess_safe(struct symbol *func)
+{
+       if (func)
+               return func->alias->uaccess_safe;
+
+       return false;
+}
+
+static inline const char *insn_dest_name(struct instruction *insn)
+{
+       if (insn->call_dest)
+               return insn->call_dest->name;
+
+       return "{dynamic}";
+}
+
+static int validate_call(struct instruction *insn, struct insn_state *state)
+{
+       if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
+               WARN_FUNC("call to %s() with UACCESS enabled",
+                               insn->sec, insn->offset, insn_dest_name(insn));
+               return 1;
+       }
+
+       if (state->df) {
+               WARN_FUNC("call to %s() with DF set",
+                               insn->sec, insn->offset, insn_dest_name(insn));
+               return 1;
+       }
+
+       return 0;
+}
+
+static int validate_sibling_call(struct instruction *insn, struct insn_state *state)
+{
+       if (has_modified_stack_frame(state)) {
+               WARN_FUNC("sibling call from callable instruction with modified stack frame",
+                               insn->sec, insn->offset);
+               return 1;
+       }
+
+       return validate_call(insn, state);
+}
+
 /*
  * Follow the branch starting at the given instruction, and recursively follow
  * any other branches (jumps).  Meanwhile, track the frame pointer state at
@@ -1843,7 +1969,9 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
                        if (!insn->hint && !insn_state_match(insn, &state))
                                return 1;
 
-                       return 0;
+                       /* If we were here with AC=0, but now have AC=1, go again */
+                       if (insn->state.uaccess || !state.uaccess)
+                               return 0;
                }
 
                if (insn->hint) {
@@ -1892,16 +2020,42 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
                insn->visited = true;
 
                if (!insn->ignore_alts) {
+                       bool skip_orig = false;
+
                        list_for_each_entry(alt, &insn->alts, list) {
+                               if (alt->skip_orig)
+                                       skip_orig = true;
+
                                ret = validate_branch(file, alt->insn, state);
-                               if (ret)
-                                       return 1;
+                               if (ret) {
+                                       if (backtrace)
+                                               BT_FUNC("(alt)", insn);
+                                       return ret;
+                               }
                        }
+
+                       if (skip_orig)
+                               return 0;
                }
 
                switch (insn->type) {
 
                case INSN_RETURN:
+                       if (state.uaccess && !func_uaccess_safe(func)) {
+                               WARN_FUNC("return with UACCESS enabled", sec, insn->offset);
+                               return 1;
+                       }
+
+                       if (!state.uaccess && func_uaccess_safe(func)) {
+                               WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", sec, insn->offset);
+                               return 1;
+                       }
+
+                       if (state.df) {
+                               WARN_FUNC("return with DF set", sec, insn->offset);
+                               return 1;
+                       }
+
                        if (func && has_modified_stack_frame(&state)) {
                                WARN_FUNC("return with modified stack frame",
                                          sec, insn->offset);
@@ -1917,17 +2071,22 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
                        return 0;
 
                case INSN_CALL:
-                       if (is_fentry_call(insn))
-                               break;
+               case INSN_CALL_DYNAMIC:
+                       ret = validate_call(insn, &state);
+                       if (ret)
+                               return ret;
 
-                       ret = dead_end_function(file, insn->call_dest);
-                       if (ret == 1)
-                               return 0;
-                       if (ret == -1)
-                               return 1;
+                       if (insn->type == INSN_CALL) {
+                               if (is_fentry_call(insn))
+                                       break;
+
+                               ret = dead_end_function(file, insn->call_dest);
+                               if (ret == 1)
+                                       return 0;
+                               if (ret == -1)
+                                       return 1;
+                       }
 
-                       /* fallthrough */
-               case INSN_CALL_DYNAMIC:
                        if (!no_fp && func && !has_valid_stack_frame(&state)) {
                                WARN_FUNC("call without frame pointer save/setup",
                                          sec, insn->offset);
@@ -1937,18 +2096,21 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
 
                case INSN_JUMP_CONDITIONAL:
                case INSN_JUMP_UNCONDITIONAL:
-                       if (insn->jump_dest &&
-                           (!func || !insn->jump_dest->func ||
-                            insn->jump_dest->func->pfunc == func)) {
-                               ret = validate_branch(file, insn->jump_dest,
-                                                     state);
+                       if (func && !insn->jump_dest) {
+                               ret = validate_sibling_call(insn, &state);
                                if (ret)
-                                       return 1;
+                                       return ret;
 
-                       } else if (func && has_modified_stack_frame(&state)) {
-                               WARN_FUNC("sibling call from callable instruction with modified stack frame",
-                                         sec, insn->offset);
-                               return 1;
+                       } else if (insn->jump_dest &&
+                                  (!func || !insn->jump_dest->func ||
+                                   insn->jump_dest->func->pfunc == func)) {
+                               ret = validate_branch(file, insn->jump_dest,
+                                                     state);
+                               if (ret) {
+                                       if (backtrace)
+                                               BT_FUNC("(branch)", insn);
+                                       return ret;
+                               }
                        }
 
                        if (insn->type == INSN_JUMP_UNCONDITIONAL)
@@ -1957,11 +2119,10 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
                        break;
 
                case INSN_JUMP_DYNAMIC:
-                       if (func && list_empty(&insn->alts) &&
-                           has_modified_stack_frame(&state)) {
-                               WARN_FUNC("sibling call from callable instruction with modified stack frame",
-                                         sec, insn->offset);
-                               return 1;
+                       if (func && list_empty(&insn->alts)) {
+                               ret = validate_sibling_call(insn, &state);
+                               if (ret)
+                                       return ret;
                        }
 
                        return 0;
@@ -1978,6 +2139,63 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
                        if (update_insn_state(insn, &state))
                                return 1;
 
+                       if (insn->stack_op.dest.type == OP_DEST_PUSHF) {
+                               if (!state.uaccess_stack) {
+                                       state.uaccess_stack = 1;
+                               } else if (state.uaccess_stack >> 31) {
+                                       WARN_FUNC("PUSHF stack exhausted", sec, insn->offset);
+                                       return 1;
+                               }
+                               state.uaccess_stack <<= 1;
+                               state.uaccess_stack  |= state.uaccess;
+                       }
+
+                       if (insn->stack_op.src.type == OP_SRC_POPF) {
+                               if (state.uaccess_stack) {
+                                       state.uaccess = state.uaccess_stack & 1;
+                                       state.uaccess_stack >>= 1;
+                                       if (state.uaccess_stack == 1)
+                                               state.uaccess_stack = 0;
+                               }
+                       }
+
+                       break;
+
+               case INSN_STAC:
+                       if (state.uaccess) {
+                               WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
+                               return 1;
+                       }
+
+                       state.uaccess = true;
+                       break;
+
+               case INSN_CLAC:
+                       if (!state.uaccess && insn->func) {
+                               WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
+                               return 1;
+                       }
+
+                       if (func_uaccess_safe(func) && !state.uaccess_stack) {
+                               WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
+                               return 1;
+                       }
+
+                       state.uaccess = false;
+                       break;
+
+               case INSN_STD:
+                       if (state.df)
+                               WARN_FUNC("recursive STD", sec, insn->offset);
+
+                       state.df = true;
+                       break;
+
+               case INSN_CLD:
+                       if (!state.df && insn->func)
+                               WARN_FUNC("redundant CLD", sec, insn->offset);
+
+                       state.df = false;
                        break;
 
                default:
@@ -2014,6 +2232,8 @@ static int validate_unwind_hints(struct objtool_file *file)
        for_each_insn(file, insn) {
                if (insn->hint && !insn->visited) {
                        ret = validate_branch(file, insn, state);
+                       if (ret && backtrace)
+                               BT_FUNC("<=== (hint)", insn);
                        warnings += ret;
                }
        }
@@ -2141,7 +2361,11 @@ static int validate_functions(struct objtool_file *file)
                        if (!insn || insn->ignore)
                                continue;
 
+                       state.uaccess = func->alias->uaccess_safe;
+
                        ret = validate_branch(file, insn, state);
+                       if (ret && backtrace)
+                               BT_FUNC("<=== (func)", insn);
                        warnings += ret;
                }
        }
@@ -2184,9 +2408,10 @@ static void cleanup(struct objtool_file *file)
        elf_close(file->elf);
 }
 
+static struct objtool_file file;
+
 int check(const char *_objname, bool orc)
 {
-       struct objtool_file file;
        int ret, warnings = 0;
 
        objname = _objname;
@@ -2197,7 +2422,6 @@ int check(const char *_objname, bool orc)
 
        INIT_LIST_HEAD(&file.insn_list);
        hash_init(file.insn_hash);
-       file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
        file.c_file = find_section_by_name(file.elf, ".comment");
        file.ignore_unreachables = no_unreachable;
        file.hints = false;
index e6e8a655b5563e84bcd5d67eee13a446db81cc31..71e54f97dbcdca023668178dbfb6cd71acc75cc5 100644 (file)
@@ -31,7 +31,8 @@ struct insn_state {
        int stack_size;
        unsigned char type;
        bool bp_scratch;
-       bool drap, end;
+       bool drap, end, uaccess, df;
+       unsigned int uaccess_stack;
        int drap_reg, drap_offset;
        struct cfi_reg vals[CFI_NUM_REGS];
 };
@@ -60,7 +61,6 @@ struct objtool_file {
        struct elf *elf;
        struct list_head insn_list;
        DECLARE_HASHTABLE(insn_hash, 16);
-       struct section *whitelist;
        bool ignore_unreachables, c_file, hints, rodata;
 };
 
index b8f3cca8e58b4ec327876c7fd3173a4a3ae6c31d..dd198d53387df0691c2cf3c9693bbb579e12c5c1 100644 (file)
@@ -219,7 +219,7 @@ static int read_sections(struct elf *elf)
 static int read_symbols(struct elf *elf)
 {
        struct section *symtab, *sec;
-       struct symbol *sym, *pfunc;
+       struct symbol *sym, *pfunc, *alias;
        struct list_head *entry, *tmp;
        int symbols_nr, i;
        char *coldstr;
@@ -239,6 +239,7 @@ static int read_symbols(struct elf *elf)
                        return -1;
                }
                memset(sym, 0, sizeof(*sym));
+               alias = sym;
 
                sym->idx = i;
 
@@ -288,11 +289,17 @@ static int read_symbols(struct elf *elf)
                                break;
                        }
 
-                       if (sym->offset == s->offset && sym->len >= s->len) {
-                               entry = tmp;
-                               break;
+                       if (sym->offset == s->offset) {
+                               if (sym->len == s->len && alias == sym)
+                                       alias = s;
+
+                               if (sym->len >= s->len) {
+                                       entry = tmp;
+                                       break;
+                               }
                        }
                }
+               sym->alias = alias;
                list_add(&sym->list, entry);
                hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx);
        }
index bc97ed86b9cd8ebd3fc8e9e1512d8d06b3e96d14..2cc2ed49322d10c97ae450f2e373865b04647d92 100644 (file)
@@ -61,7 +61,8 @@ struct symbol {
        unsigned char bind, type;
        unsigned long offset;
        unsigned int len;
-       struct symbol *pfunc, *cfunc;
+       struct symbol *pfunc, *cfunc, *alias;
+       bool uaccess_safe;
 };
 
 struct rela {
index 50af4e1274b39d20758208a4944453c46b3aa448..4e50563d87c6466aca09ec1ec756503d97e1921a 100644 (file)
@@ -23,6 +23,7 @@
 #include <stdlib.h>
 #include <string.h>
 
+#include "builtin.h"
 #include "special.h"
 #include "warn.h"
 
@@ -42,6 +43,7 @@
 #define ALT_NEW_LEN_OFFSET     11
 
 #define X86_FEATURE_POPCNT (4*32+23)
+#define X86_FEATURE_SMAP   (9*32+20)
 
 struct special_entry {
        const char *sec;
@@ -110,6 +112,22 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
                 */
                if (feature == X86_FEATURE_POPCNT)
                        alt->skip_orig = true;
+
+               /*
+                * If UACCESS validation is enabled; force that alternative;
+                * otherwise force it the other way.
+                *
+                * What we want to avoid is having both the original and the
+                * alternative code flow at the same time, in that case we can
+                * find paths that see the STAC but take the NOP instead of
+                * CLAC and the other way around.
+                */
+               if (feature == X86_FEATURE_SMAP) {
+                       if (uaccess)
+                               alt->skip_orig = true;
+                       else
+                               alt->skip_alt = true;
+               }
        }
 
        orig_rela = find_rela_by_dest(sec, offset + entry->orig);
index fad1d092f679e30129983071f058cfcde4234e1f..d5c062e718eff7e7f77008b0c2e75171ee91fff5 100644 (file)
@@ -26,6 +26,7 @@ struct special_alt {
 
        bool group;
        bool skip_orig;
+       bool skip_alt;
        bool jump_or_nop;
 
        struct section *orig_sec;
index afd9f7a05f6d1ead695f3e26b79f9001cae5b6c6..f4fbb972b611c45a360f1095dbada9e98adcfa26 100644 (file)
@@ -64,6 +64,14 @@ static inline char *offstr(struct section *sec, unsigned long offset)
        free(_str);                                     \
 })
 
+#define BT_FUNC(format, insn, ...)                     \
+({                                                     \
+       struct instruction *_insn = (insn);             \
+       char *_str = offstr(_insn->sec, _insn->offset); \
+       WARN("  %s: " format, _str, ##__VA_ARGS__);     \
+       free(_str);                                     \
+})
+
 #define WARN_ELF(format, ...)                          \
        WARN(format ": %s", ##__VA_ARGS__, elf_errmsg(-1))
 
index f6fc6507ba55ecade8065382a10723f67f914529..3766886c4bca3d967ed2e714982be5016c4816f4 100644 (file)
@@ -47,3 +47,27 @@ Those objects are then used in final linking:
 
 NOTE this description is omitting other libraries involved, only
      focusing on build framework outcomes
+
+3) Build with ASan or UBSan
+==========================
+  $ cd tools/perf
+  $ make DESTDIR=/usr
+  $ make DESTDIR=/usr install
+
+AddressSanitizer (or ASan) is a GCC feature that detects memory corruption bugs
+such as buffer overflows and memory leaks.
+
+  $ cd tools/perf
+  $ make DEBUG=1 EXTRA_CFLAGS='-fno-omit-frame-pointer -fsanitize=address'
+  $ ASAN_OPTIONS=log_path=asan.log ./perf record -a
+
+ASan outputs all detected issues into a log file named 'asan.log.<pid>'.
+
+UndefinedBehaviorSanitizer (or UBSan) is a fast undefined behavior detector
+supported by GCC. UBSan detects undefined behaviors of programs at runtime.
+
+  $ cd tools/perf
+  $ make DEBUG=1 EXTRA_CFLAGS='-fno-omit-frame-pointer -fsanitize=undefined'
+  $ UBSAN_OPTIONS=print_stacktrace=1 ./perf record -a
+
+If UBSan detects any problem at runtime, it outputs a “runtime error:” message.
index 86f3dcc15f8375726f0ed90d333e79e3cfadb669..462b3cde067546dae83fdf7930621e6e67450974 100644 (file)
@@ -114,7 +114,7 @@ Given a $HOME/.perfconfig like this:
 
        [report]
                # Defaults
-               sort-order = comm,dso,symbol
+               sort_order = comm,dso,symbol
                percent-limit = 0
                queue-size = 0
                children = true
@@ -584,6 +584,20 @@ llvm.*::
        llvm.opts::
                Options passed to llc.
 
+samples.*::
+
+       samples.context::
+               Define how many ns worth of time to show
+               around samples in perf report sample context browser.
+
+scripts.*::
+
+       Any option defines a script that is added to the scripts menu
+       in the interactive perf browser and whose output is displayed.
+       The name of the option is the name, the value is a script command line.
+       The script gets the same options passed as a full perf script,
+       in particular -i perfdata file, --cpu, --tid
+
 SEE ALSO
 --------
 linkperf:perf[1]
index 8f0c2be34848f1a382722be4fa2ff25df7bdbace..58986f4cc190f60654d1dc30c373ae5a412aaa3b 100644 (file)
@@ -459,6 +459,25 @@ Set affinity mask of trace reading thread according to the policy defined by 'mo
   node - thread affinity mask is set to NUMA node cpu mask of the processed mmap buffer
   cpu  - thread affinity mask is set to cpu of the processed mmap buffer
 
+--mmap-flush=number::
+
+Specify minimal number of bytes that is extracted from mmap data pages and
+processed for output. One can specify the number using B/K/M/G suffixes.
+
+The maximal allowed value is a quarter of the size of mmaped data pages.
+
+The default option value is 1 byte which means that every time that the output
+writing thread finds some new data in the mmaped buffer the data is extracted,
+possibly compressed (-z) and written to the output, perf.data or pipe.
+
+Larger data chunks are compressed more effectively in comparison to smaller
+chunks so extraction of larger chunks from the mmap data pages is preferable
+from the perspective of output size reduction.
+
+Also at some cases executing less output write syscalls with bigger data size
+can take less time than executing more output write syscalls with smaller data
+size thus lowering runtime profiling overhead.
+
 --all-kernel::
 Configure all used events to run in kernel space.
 
@@ -495,6 +514,10 @@ overhead. You can still switch them on with:
 
   --switch-output --no-no-buildid  --no-no-buildid-cache
 
+--switch-max-files=N::
+
+When rotating perf.data with --switch-output, only keep N files.
+
 --dry-run::
 Parse options then exit. --dry-run can be used to detect errors in cmdline
 options.
index 1a27bfe05039f8284abea73947759fcccce9ecab..f441baa794ce826eff34b24d07796847c4a65127 100644 (file)
@@ -105,6 +105,8 @@ OPTIONS
        guest machine
        - sample: Number of sample
        - period: Raw number of event count of sample
+       - time: Separate the samples by time stamp with the resolution specified by
+       --time-quantum (default 100ms). Specify with overhead and before it.
 
        By default, comm, dso and symbol keys are used.
        (i.e. --sort comm,dso,symbol)
@@ -459,6 +461,10 @@ include::itrace.txt[]
 --socket-filter::
        Only report the samples on the processor socket that match with this filter
 
+--samples=N::
+       Save N individual samples for each histogram entry to show context in perf
+       report tui browser.
+
 --raw-trace::
        When displaying traceevent output, do not use print fmt or plugins.
 
@@ -477,6 +483,9 @@ include::itrace.txt[]
        Please note that not all mmaps are stored, options affecting which ones
        are include 'perf record --data', for instance.
 
+--ns::
+       Show time stamps in nanoseconds.
+
 --stats::
        Display overall events statistics without any further processing.
        (like the one at the end of the perf report -D command)
@@ -494,6 +503,10 @@ include::itrace.txt[]
        The period/hits keywords set the base the percentage is computed
        on - the samples period or the number of samples (hits).
 
+--time-quantum::
+       Configure time quantum for time sort key. Default 100ms.
+       Accepts s, us, ms, ns units.
+
 include::callchain-overhead-calculation.txt[]
 
 SEE ALSO
index 2e19fd7ffe35135a94fb55f66ea367171cc3fb2d..9b0d04dd2a615827593c8b60d449a97c488cfe78 100644 (file)
@@ -380,6 +380,9 @@ include::itrace.txt[]
        Set the maximum number of program blocks to print with brstackasm for
        each sample.
 
+--reltime::
+       Print time stamps relative to trace start.
+
 --per-event-dump::
        Create per event files with a "perf.data.EVENT.dump" name instead of
         printing to stdout, useful, for instance, for generating flamegraphs.
index 4bc2085e5197a2ea59ecb18820a379ddc1034c85..39c05f89104e78dc6b577ee2d2a4a3b20729539e 100644 (file)
@@ -72,9 +72,8 @@ report::
 --all-cpus::
         system-wide collection from all CPUs (default if no target is specified)
 
--c::
---scale::
-       scale/normalize counter values
+--no-scale::
+       Don't scale/normalize counter values
 
 -d::
 --detailed::
index 849599f39c5e9c128fc21b888a07fa77108a46a1..869965d629ce0a1e7940a03288b905e70446a53c 100644 (file)
@@ -15,6 +15,7 @@ To see callchains in a more compact form: perf report -g folded
 Show individual samples with: perf script
 Limit to show entries above 5% only: perf report --percent-limit 5
 Profiling branch (mis)predictions with: perf record -b / perf report
+To show assembler sample contexts use perf record -b / perf script -F +brstackinsn --xed
 Treat branches as callchains: perf report --branch-history
 To count events in every 1000 msec: perf stat -I 1000
 Print event counts in CSV format with: perf stat -x,
@@ -34,3 +35,9 @@ Show current config key-value pairs: perf config --list
 Show user configuration overrides: perf config --user --list
 To add Node.js USDT(User-Level Statically Defined Tracing): perf buildid-cache --add `which node`
 To report cacheline events from previous recording: perf c2c report
+To browse sample contexts use perf report --sample 10 and select in context menu
+To separate samples by time use perf report --sort time,overhead,sym
+To set sample time separation other than 100ms with --sort time use --time-quantum
+Add -I to perf report to sample register values visible in perf report context.
+To show IPC for sampling periods use perf record -e '{cycles,instructions}:S' and then browse context
+To show context switches in perf report sample context add --switch-events to perf record.
index 0f11d5891301ad17cf7b2e3d572464e670abb03f..0c52a01dc759af86ab21f604c96a240bff3f6f5f 100644 (file)
@@ -152,6 +152,13 @@ endif
 FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
 FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
 
+ifdef LIBZSTD_DIR
+  LIBZSTD_CFLAGS  := -I$(LIBZSTD_DIR)/lib
+  LIBZSTD_LDFLAGS := -L$(LIBZSTD_DIR)/lib
+endif
+FEATURE_CHECK_CFLAGS-libzstd := $(LIBZSTD_CFLAGS)
+FEATURE_CHECK_LDFLAGS-libzstd := $(LIBZSTD_LDFLAGS)
+
 FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi
 # include ARCH specific config
 -include $(src-perf)/arch/$(SRCARCH)/Makefile
@@ -227,6 +234,8 @@ FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
 
 FEATURE_CHECK_LDFLAGS-libaio = -lrt
 
+FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes -ldl
+
 CFLAGS += -fno-omit-frame-pointer
 CFLAGS += -ggdb3
 CFLAGS += -funwind-tables
@@ -713,7 +722,7 @@ else
 endif
 
 ifeq ($(feature-libbfd), 1)
-  EXTLIBS += -lbfd
+  EXTLIBS += -lbfd -lopcodes
 else
   # we are on a system that requires -liberty and (maybe) -lz
   # to link against -lbfd; test each case individually here
@@ -724,12 +733,15 @@ else
   $(call feature_check,libbfd-liberty-z)
 
   ifeq ($(feature-libbfd-liberty), 1)
-    EXTLIBS += -lbfd -liberty
+    EXTLIBS += -lbfd -lopcodes -liberty
+    FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -ldl
   else
     ifeq ($(feature-libbfd-liberty-z), 1)
-      EXTLIBS += -lbfd -liberty -lz
+      EXTLIBS += -lbfd -lopcodes -liberty -lz
+      FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -lz -ldl
     endif
   endif
+  $(call feature_check,disassembler-four-args)
 endif
 
 ifdef NO_DEMANGLE
@@ -782,6 +794,19 @@ ifndef NO_LZMA
   endif
 endif
 
+ifndef NO_LIBZSTD
+  ifeq ($(feature-libzstd), 1)
+    CFLAGS += -DHAVE_ZSTD_SUPPORT
+    CFLAGS += $(LIBZSTD_CFLAGS)
+    LDFLAGS += $(LIBZSTD_LDFLAGS)
+    EXTLIBS += -lzstd
+    $(call detected,CONFIG_ZSTD)
+  else
+    msg := $(warning No libzstd found, disables trace compression, please install libzstd-dev[el] and/or set LIBZSTD_DIR);
+    NO_LIBZSTD := 1
+  endif
+endif
+
 ifndef NO_BACKTRACE
   ifeq ($(feature-backtrace), 1)
     CFLAGS += -DHAVE_BACKTRACE_SUPPORT
@@ -808,6 +833,10 @@ ifdef HAVE_KVM_STAT_SUPPORT
     CFLAGS += -DHAVE_KVM_STAT_SUPPORT
 endif
 
+ifeq ($(feature-disassembler-four-args), 1)
+    CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+endif
+
 ifeq (${IS_64_BIT}, 1)
   ifndef NO_PERF_READ_VDSO32
     $(call feature_check,compile-32)
index 01f7555fd93369a60d8abe6bee7198eb1cbd3e12..c706548d5b105088f19e8b319fed0ea592aa291b 100644 (file)
@@ -108,6 +108,9 @@ include ../scripts/utilities.mak
 # streaming for record mode. Currently Posix AIO trace streaming is
 # supported only when linking with glibc.
 #
+# Define NO_LIBZSTD if you do not want support of Zstandard based runtime
+# trace compression in record mode.
+#
 
 # As per kernel Makefile, avoid funny character set dependencies
 unexport LC_ALL
@@ -481,8 +484,8 @@ $(madvise_behavior_array): $(madvise_hdr_dir)/mman-common.h $(madvise_behavior_t
 mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c
 mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh
 
-$(mmap_flags_array): $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl)
-       $(Q)$(SHELL) '$(mmap_flags_tbl)' $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@
+$(mmap_flags_array): $(linux_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl)
+       $(Q)$(SHELL) '$(mmap_flags_tbl)' $(linux_uapi_dir) $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@
 
 mount_flags_array := $(beauty_outdir)/mount_flags_array.c
 mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh
index f0b1709a5ffb2b0901d7f2492252876d17bc25a0..92ee0b4378d4c23b9ac3d22d92d4637efb187f94 100644 (file)
 332    common  statx                   __x64_sys_statx
 333    common  io_pgetevents           __x64_sys_io_pgetevents
 334    common  rseq                    __x64_sys_rseq
+# don't use numbers 387 through 423, add new calls after the last
+# 'common' entry
+424    common  pidfd_send_signal       __x64_sys_pidfd_send_signal
+425    common  io_uring_setup          __x64_sys_io_uring_setup
+426    common  io_uring_enter          __x64_sys_io_uring_enter
+427    common  io_uring_register       __x64_sys_io_uring_register
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
 520    x32     execve                  __x32_compat_sys_execve/ptregs
 521    x32     ptrace                  __x32_compat_sys_ptrace
 522    x32     rt_sigpending           __x32_compat_sys_rt_sigpending
-523    x32     rt_sigtimedwait         __x32_compat_sys_rt_sigtimedwait
+523    x32     rt_sigtimedwait         __x32_compat_sys_rt_sigtimedwait_time64
 524    x32     rt_sigqueueinfo         __x32_compat_sys_rt_sigqueueinfo
 525    x32     sigaltstack             __x32_compat_sys_sigaltstack
 526    x32     timer_create            __x32_compat_sys_timer_create
 534    x32     preadv                  __x32_compat_sys_preadv64
 535    x32     pwritev                 __x32_compat_sys_pwritev64
 536    x32     rt_tgsigqueueinfo       __x32_compat_sys_rt_tgsigqueueinfo
-537    x32     recvmmsg                __x32_compat_sys_recvmmsg
+537    x32     recvmmsg                __x32_compat_sys_recvmmsg_time64
 538    x32     sendmmsg                __x32_compat_sys_sendmmsg
 539    x32     process_vm_readv        __x32_compat_sys_process_vm_readv
 540    x32     process_vm_writev       __x32_compat_sys_process_vm_writev
index 7aab0be5fc5ffb8fc09081b2617cbeccae9dfc42..47f9c56e744f8c3751e6cf22ec9f738182ea8370 100644 (file)
@@ -14,5 +14,6 @@ perf-$(CONFIG_LOCAL_LIBUNWIND)    += unwind-libunwind.o
 perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
 
 perf-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-$(CONFIG_AUXTRACE) += archinsn.o
 perf-$(CONFIG_AUXTRACE) += intel-pt.o
 perf-$(CONFIG_AUXTRACE) += intel-bts.o
diff --git a/tools/perf/arch/x86/util/archinsn.c b/tools/perf/arch/x86/util/archinsn.c
new file mode 100644 (file)
index 0000000..4237bb2
--- /dev/null
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "perf.h"
+#include "archinsn.h"
+#include "util/intel-pt-decoder/insn.h"
+#include "machine.h"
+#include "thread.h"
+#include "symbol.h"
+
+void arch_fetch_insn(struct perf_sample *sample,
+                    struct thread *thread,
+                    struct machine *machine)
+{
+       struct insn insn;
+       int len;
+       bool is64bit = false;
+
+       if (!sample->ip)
+               return;
+       len = thread__memcpy(thread, machine, sample->insn, sample->ip, sizeof(sample->insn), &is64bit);
+       if (len <= 0)
+               return;
+       insn_init(&insn, sample->insn, len, is64bit);
+       insn_get_length(&insn);
+       if (insn_complete(&insn) && insn.length <= len)
+               sample->insn_len = insn.length;
+}
index 0c0a6e824934149997c64190fff22a0f8338f6d1..2af067859966599cf2df1b96ab30100311c61a05 100644 (file)
@@ -224,7 +224,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
        pthread_attr_t thread_attr, *attrp = NULL;
        cpu_set_t cpuset;
        unsigned int i, j;
-       int ret;
+       int ret = 0;
 
        if (!noaffinity)
                pthread_attr_init(&thread_attr);
index 5a11534e96a0c52cf15fcda34b55e8126010b479..fe85448abd454b373828ff2d0a6824356e9fff86 100644 (file)
@@ -293,7 +293,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
        pthread_attr_t thread_attr, *attrp = NULL;
        cpu_set_t cpuset;
        unsigned int i, j;
-       int ret, events = EPOLLIN;
+       int ret = 0, events = EPOLLIN;
 
        if (oneshot)
                events |= EPOLLONESHOT;
index 98ad783efc69dd1c3d3cae65704ce7bd0181fe37..a7784554a80deba55f91753598f8a10796f8d6f4 100644 (file)
 #include <numa.h>
 #include <numaif.h>
 
+#ifndef RUSAGE_THREAD
+# define RUSAGE_THREAD 1
+#endif
+
 /*
  * Regular printout to the terminal, supressed if -q is specified:
  */
index fa520f4b8095ae6eac3922ac73781605041772be..b80eee45511113cd094bbe3f75bd2bdab001e0bc 100644 (file)
@@ -1975,7 +1975,7 @@ int cmd_kmem(int argc, const char **argv)
                        goto out_delete;
                }
 
-               kmem_page_size = tep_get_page_size(evsel->tp_format->pevent);
+               kmem_page_size = tep_get_page_size(evsel->tp_format->tep);
                symbol_conf.use_callchain = true;
        }
 
index c9f98d00c0e998292d334c92bcfa4186a74dd6bc..e0312a1c4792ae49d08d7cb0d33d28e2f9d44f15 100644 (file)
@@ -70,10 +70,11 @@ int cmd_list(int argc, const char **argv)
                        print_symbol_events(NULL, PERF_TYPE_HARDWARE,
                                        event_symbols_hw, PERF_COUNT_HW_MAX, raw_dump);
                else if (strcmp(argv[i], "sw") == 0 ||
-                        strcmp(argv[i], "software") == 0)
+                        strcmp(argv[i], "software") == 0) {
                        print_symbol_events(NULL, PERF_TYPE_SOFTWARE,
                                        event_symbols_sw, PERF_COUNT_SW_MAX, raw_dump);
-               else if (strcmp(argv[i], "cache") == 0 ||
+                       print_tool_events(NULL, raw_dump);
+               } else if (strcmp(argv[i], "cache") == 0 ||
                         strcmp(argv[i], "hwcache") == 0)
                        print_hwcache_events(NULL, raw_dump);
                else if (strcmp(argv[i], "pmu") == 0)
@@ -113,13 +114,14 @@ int cmd_list(int argc, const char **argv)
                                            event_symbols_hw, PERF_COUNT_HW_MAX, raw_dump);
                        print_symbol_events(s, PERF_TYPE_SOFTWARE,
                                            event_symbols_sw, PERF_COUNT_SW_MAX, raw_dump);
+                       print_tool_events(s, raw_dump);
                        print_hwcache_events(s, raw_dump);
                        print_pmu_events(s, raw_dump, !desc_flag,
                                                long_desc_flag,
                                                details_flag);
                        print_tracepoint_events(NULL, s, raw_dump);
                        print_sdt_events(NULL, s, raw_dump);
-                       metricgroup__print(true, true, NULL, raw_dump, details_flag);
+                       metricgroup__print(true, true, s, raw_dump, details_flag);
                        free(s);
                }
        }
index f3f7f310033663f5b61752cfb6c835b280f8436e..c5e10552776a93f92d7eb4f7d6901091d5f0d538 100644 (file)
@@ -62,6 +62,9 @@ struct switch_output {
        unsigned long    time;
        const char      *str;
        bool             set;
+       char             **filenames;
+       int              num_files;
+       int              cur_file;
 };
 
 struct record {
@@ -334,6 +337,41 @@ static int record__aio_enabled(struct record *rec)
        return rec->opts.nr_cblocks > 0;
 }
 
+#define MMAP_FLUSH_DEFAULT 1
+static int record__mmap_flush_parse(const struct option *opt,
+                                   const char *str,
+                                   int unset)
+{
+       int flush_max;
+       struct record_opts *opts = (struct record_opts *)opt->value;
+       static struct parse_tag tags[] = {
+                       { .tag  = 'B', .mult = 1       },
+                       { .tag  = 'K', .mult = 1 << 10 },
+                       { .tag  = 'M', .mult = 1 << 20 },
+                       { .tag  = 'G', .mult = 1 << 30 },
+                       { .tag  = 0 },
+       };
+
+       if (unset)
+               return 0;
+
+       if (str) {
+               opts->mmap_flush = parse_tag_value(str, tags);
+               if (opts->mmap_flush == (int)-1)
+                       opts->mmap_flush = strtol(str, NULL, 0);
+       }
+
+       if (!opts->mmap_flush)
+               opts->mmap_flush = MMAP_FLUSH_DEFAULT;
+
+       flush_max = perf_evlist__mmap_size(opts->mmap_pages);
+       flush_max /= 4;
+       if (opts->mmap_flush > flush_max)
+               opts->mmap_flush = flush_max;
+
+       return 0;
+}
+
 static int process_synthesized_event(struct perf_tool *tool,
                                     union perf_event *event,
                                     struct perf_sample *sample __maybe_unused,
@@ -392,7 +430,7 @@ static int record__process_auxtrace(struct perf_tool *tool,
        size_t padding;
        u8 pad[8] = {0};
 
-       if (!perf_data__is_pipe(data)) {
+       if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
                off_t file_offset;
                int fd = perf_data__fd(data);
                int err;
@@ -543,7 +581,8 @@ static int record__mmap_evlist(struct record *rec,
        if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
                                 opts->auxtrace_mmap_pages,
                                 opts->auxtrace_snapshot_mode,
-                                opts->nr_cblocks, opts->affinity) < 0) {
+                                opts->nr_cblocks, opts->affinity,
+                                opts->mmap_flush) < 0) {
                if (errno == EPERM) {
                        pr_err("Permission error mapping pages.\n"
                               "Consider increasing "
@@ -733,7 +772,7 @@ static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
 }
 
 static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
-                                   bool overwrite)
+                                   bool overwrite, bool synch)
 {
        u64 bytes_written = rec->bytes_written;
        int i;
@@ -756,12 +795,19 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
                off = record__aio_get_pos(trace_fd);
 
        for (i = 0; i < evlist->nr_mmaps; i++) {
+               u64 flush = 0;
                struct perf_mmap *map = &maps[i];
 
                if (map->base) {
                        record__adjust_affinity(rec, map);
+                       if (synch) {
+                               flush = map->flush;
+                               map->flush = 1;
+                       }
                        if (!record__aio_enabled(rec)) {
                                if (perf_mmap__push(map, rec, record__pushfn) != 0) {
+                                       if (synch)
+                                               map->flush = flush;
                                        rc = -1;
                                        goto out;
                                }
@@ -774,10 +820,14 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
                                idx = record__aio_sync(map, false);
                                if (perf_mmap__aio_push(map, rec, idx, record__aio_pushfn, &off) != 0) {
                                        record__aio_set_pos(trace_fd, off);
+                                       if (synch)
+                                               map->flush = flush;
                                        rc = -1;
                                        goto out;
                                }
                        }
+                       if (synch)
+                               map->flush = flush;
                }
 
                if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
@@ -803,15 +853,15 @@ out:
        return rc;
 }
 
-static int record__mmap_read_all(struct record *rec)
+static int record__mmap_read_all(struct record *rec, bool synch)
 {
        int err;
 
-       err = record__mmap_read_evlist(rec, rec->evlist, false);
+       err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
        if (err)
                return err;
 
-       return record__mmap_read_evlist(rec, rec->evlist, true);
+       return record__mmap_read_evlist(rec, rec->evlist, true, synch);
 }
 
 static void record__init_features(struct record *rec)
@@ -837,6 +887,8 @@ static void record__init_features(struct record *rec)
        if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
                perf_header__clear_feat(&session->header, HEADER_CLOCKID);
 
+       perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
+
        perf_header__clear_feat(&session->header, HEADER_STAT);
 }
 
@@ -890,6 +942,7 @@ record__switch_output(struct record *rec, bool at_exit)
 {
        struct perf_data *data = &rec->data;
        int fd, err;
+       char *new_filename;
 
        /* Same Size:      "2015122520103046"*/
        char timestamp[] = "InvalidTimestamp";
@@ -910,7 +963,7 @@ record__switch_output(struct record *rec, bool at_exit)
 
        fd = perf_data__switch(data, timestamp,
                                    rec->session->header.data_offset,
-                                   at_exit);
+                                   at_exit, &new_filename);
        if (fd >= 0 && !at_exit) {
                rec->bytes_written = 0;
                rec->session->header.data_size = 0;
@@ -920,6 +973,21 @@ record__switch_output(struct record *rec, bool at_exit)
                fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
                        data->path, timestamp);
 
+       if (rec->switch_output.num_files) {
+               int n = rec->switch_output.cur_file + 1;
+
+               if (n >= rec->switch_output.num_files)
+                       n = 0;
+               rec->switch_output.cur_file = n;
+               if (rec->switch_output.filenames[n]) {
+                       remove(rec->switch_output.filenames[n]);
+                       free(rec->switch_output.filenames[n]);
+               }
+               rec->switch_output.filenames[n] = new_filename;
+       } else {
+               free(new_filename);
+       }
+
        /* Output tracking events */
        if (!at_exit) {
                record__synthesize(rec, false);
@@ -1093,7 +1161,7 @@ static int record__synthesize(struct record *rec, bool tail)
                return err;
        }
 
-       err = perf_event__synthesize_bpf_events(tool, process_synthesized_event,
+       err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
                                                machine, opts);
        if (err < 0)
                pr_warning("Couldn't synthesize bpf events.\n");
@@ -1116,6 +1184,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
        struct perf_data *data = &rec->data;
        struct perf_session *session;
        bool disabled = false, draining = false;
+       struct perf_evlist *sb_evlist = NULL;
        int fd;
 
        atexit(record__sig_exit);
@@ -1216,6 +1285,14 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                goto out_child;
        }
 
+       if (!opts->no_bpf_event)
+               bpf_event__add_sb_event(&sb_evlist, &session->header.env);
+
+       if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
+               pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
+               opts->no_bpf_event = true;
+       }
+
        err = record__synthesize(rec, false);
        if (err < 0)
                goto out_child;
@@ -1310,7 +1387,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                if (trigger_is_hit(&switch_output_trigger) || done || draining)
                        perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
 
-               if (record__mmap_read_all(rec) < 0) {
+               if (record__mmap_read_all(rec, false) < 0) {
                        trigger_error(&auxtrace_snapshot_trigger);
                        trigger_error(&switch_output_trigger);
                        err = -1;
@@ -1411,6 +1488,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                record__synthesize_workload(rec, true);
 
 out_child:
+       record__mmap_read_all(rec, true);
        record__aio_mmap_read_sync(rec);
 
        if (forks) {
@@ -1466,6 +1544,9 @@ out_child:
 
 out_delete_session:
        perf_session__delete(session);
+
+       if (!opts->no_bpf_event)
+               perf_evlist__stop_sb_thread(sb_evlist);
        return status;
 }
 
@@ -1813,6 +1894,7 @@ static struct record record = {
                        .uses_mmap   = true,
                        .default_per_cpu = true,
                },
+               .mmap_flush          = MMAP_FLUSH_DEFAULT,
        },
        .tool = {
                .sample         = process_sample_event,
@@ -1870,7 +1952,7 @@ static struct option __record_options[] = {
        OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
                    "synthesize non-sample events at the end of output"),
        OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
-       OPT_BOOLEAN(0, "bpf-event", &record.opts.bpf_event, "record bpf events"),
+       OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
        OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
                    "Fail if the specified frequency can't be used"),
        OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
@@ -1879,6 +1961,9 @@ static struct option __record_options[] = {
        OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
                     "number of mmap data pages and AUX area tracing mmap pages",
                     record__parse_mmap_pages),
+       OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
+                    "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
+                    record__mmap_flush_parse),
        OPT_BOOLEAN(0, "group", &record.opts.group,
                    "put the counters into a counter group"),
        OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
@@ -1968,9 +2053,11 @@ static struct option __record_options[] = {
        OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
                    "Record timestamp boundary (time of first/last samples)"),
        OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
-                         &record.switch_output.set, "signal,size,time",
-                         "Switch output when receive SIGUSR2 or cross size,time threshold",
+                         &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
+                         "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
                          "signal"),
+       OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
+                  "Limit number of switch output generated files"),
        OPT_BOOLEAN(0, "dry-run", &dry_run,
                    "Parse options then exit"),
 #ifdef HAVE_AIO_SUPPORT
@@ -2057,6 +2144,13 @@ int cmd_record(int argc, const char **argv)
                alarm(rec->switch_output.time);
        }
 
+       if (rec->switch_output.num_files) {
+               rec->switch_output.filenames = calloc(sizeof(char *),
+                                                     rec->switch_output.num_files);
+               if (!rec->switch_output.filenames)
+                       return -EINVAL;
+       }
+
        /*
         * Allow aliases to facilitate the lookup of symbols for address
         * filters. Refer to auxtrace_parse_filters().
@@ -2182,6 +2276,7 @@ int cmd_record(int argc, const char **argv)
                pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
 
        pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
+       pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
 
        err = __cmd_record(&record, argc, argv);
 out:
index ee93c18a6685c1c3ed3bcb8a5d5c71e1720ad1e8..4054eb1f98ac19d956cf680dfb84dcf13d509db5 100644 (file)
 #include <errno.h>
 #include <inttypes.h>
 #include <regex.h>
+#include "sane_ctype.h"
 #include <signal.h>
 #include <linux/bitmap.h>
 #include <linux/stringify.h>
+#include <linux/time64.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
@@ -926,6 +928,43 @@ report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
        return parse_callchain_report_opt(arg);
 }
 
+static int
+parse_time_quantum(const struct option *opt, const char *arg,
+                  int unset __maybe_unused)
+{
+       unsigned long *time_q = opt->value;
+       char *end;
+
+       *time_q = strtoul(arg, &end, 0);
+       if (end == arg)
+               goto parse_err;
+       if (*time_q == 0) {
+               pr_err("time quantum cannot be 0");
+               return -1;
+       }
+       while (isspace(*end))
+               end++;
+       if (*end == 0)
+               return 0;
+       if (!strcmp(end, "s")) {
+               *time_q *= NSEC_PER_SEC;
+               return 0;
+       }
+       if (!strcmp(end, "ms")) {
+               *time_q *= NSEC_PER_MSEC;
+               return 0;
+       }
+       if (!strcmp(end, "us")) {
+               *time_q *= NSEC_PER_USEC;
+               return 0;
+       }
+       if (!strcmp(end, "ns"))
+               return 0;
+parse_err:
+       pr_err("Cannot parse time quantum `%s'\n", arg);
+       return -1;
+}
+
 int
 report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
                                const char *arg, int unset __maybe_unused)
@@ -1044,10 +1083,9 @@ int cmd_report(int argc, const char **argv)
        OPT_BOOLEAN(0, "header-only", &report.header_only,
                    "Show only data header."),
        OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
-                  "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
-                  " Please refer the man page for the complete list."),
+                  sort_help("sort by key(s):")),
        OPT_STRING('F', "fields", &field_order, "key[,keys...]",
-                  "output field(s): overhead, period, sample plus all of sort keys"),
+                  sort_help("output field(s): overhead period sample ")),
        OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
                    "Show sample percentage for different cpu modes"),
        OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
@@ -1120,6 +1158,8 @@ int cmd_report(int argc, const char **argv)
        OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
                    "Enable kernel symbol demangling"),
        OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
+       OPT_INTEGER(0, "samples", &symbol_conf.res_sample,
+                   "Number of samples to save per histogram entry for individual browsing"),
        OPT_CALLBACK(0, "percent-limit", &report, "percent",
                     "Don't show entries under that percent", parse_percent_limit),
        OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
@@ -1147,6 +1187,10 @@ int cmd_report(int argc, const char **argv)
        OPT_CALLBACK(0, "percent-type", &report.annotation_opts, "local-period",
                     "Set percent type local/global-period/hits",
                     annotate_parse_percent_type),
+       OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"),
+       OPT_CALLBACK(0, "time-quantum", &symbol_conf.time_quantum, "time (ms|us|ns|s)",
+                    "Set time quantum for time sort key (default 100ms)",
+                    parse_time_quantum),
        OPT_END()
        };
        struct perf_data data = {
index 53f78cf3113f9ed5fe4a9d9de42fdf2a3a0b9d6e..61cfd8f70989235de6112e114925d08831b505dc 100644 (file)
 #include "util/time-utils.h"
 #include "util/path.h"
 #include "print_binary.h"
+#include "archinsn.h"
 #include <linux/bitmap.h>
 #include <linux/kernel.h>
 #include <linux/stringify.h>
 #include <linux/time64.h>
+#include <sys/utsname.h>
 #include "asm/bug.h"
 #include "util/mem-events.h"
 #include "util/dump-insn.h"
@@ -51,6 +53,8 @@
 
 static char const              *script_name;
 static char const              *generate_script_lang;
+static bool                    reltime;
+static u64                     initial_time;
 static bool                    debug_mode;
 static u64                     last_timestamp;
 static u64                     nr_unordered;
@@ -58,11 +62,11 @@ static bool                 no_callchain;
 static bool                    latency_format;
 static bool                    system_wide;
 static bool                    print_flags;
-static bool                    nanosecs;
 static const char              *cpu_list;
 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
 static struct perf_stat_config stat_config;
 static int                     max_blocks;
+static bool                    native_arch;
 
 unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
 
@@ -684,15 +688,21 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
        }
 
        if (PRINT_FIELD(TIME)) {
-               nsecs = sample->time;
+               u64 t = sample->time;
+               if (reltime) {
+                       if (!initial_time)
+                               initial_time = sample->time;
+                       t = sample->time - initial_time;
+               }
+               nsecs = t;
                secs = nsecs / NSEC_PER_SEC;
                nsecs -= secs * NSEC_PER_SEC;
 
-               if (nanosecs)
+               if (symbol_conf.nanosecs)
                        printed += fprintf(fp, "%5lu.%09llu: ", secs, nsecs);
                else {
                        char sample_time[32];
-                       timestamp__scnprintf_usec(sample->time, sample_time, sizeof(sample_time));
+                       timestamp__scnprintf_usec(t, sample_time, sizeof(sample_time));
                        printed += fprintf(fp, "%12s: ", sample_time);
                }
        }
@@ -1227,6 +1237,12 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
        return len + dlen;
 }
 
+__weak void arch_fetch_insn(struct perf_sample *sample __maybe_unused,
+                           struct thread *thread __maybe_unused,
+                           struct machine *machine __maybe_unused)
+{
+}
+
 static int perf_sample__fprintf_insn(struct perf_sample *sample,
                                     struct perf_event_attr *attr,
                                     struct thread *thread,
@@ -1234,9 +1250,12 @@ static int perf_sample__fprintf_insn(struct perf_sample *sample,
 {
        int printed = 0;
 
+       if (sample->insn_len == 0 && native_arch)
+               arch_fetch_insn(sample, thread, machine);
+
        if (PRINT_FIELD(INSNLEN))
                printed += fprintf(fp, " ilen: %d", sample->insn_len);
-       if (PRINT_FIELD(INSN)) {
+       if (PRINT_FIELD(INSN) && sample->insn_len) {
                int i;
 
                printed += fprintf(fp, " insn:");
@@ -1922,6 +1941,13 @@ static int cleanup_scripting(void)
        return scripting_ops ? scripting_ops->stop_script() : 0;
 }
 
+static bool filter_cpu(struct perf_sample *sample)
+{
+       if (cpu_list)
+               return !test_bit(sample->cpu, cpu_bitmap);
+       return false;
+}
+
 static int process_sample_event(struct perf_tool *tool,
                                union perf_event *event,
                                struct perf_sample *sample,
@@ -1956,7 +1982,7 @@ static int process_sample_event(struct perf_tool *tool,
        if (al.filtered)
                goto out_put;
 
-       if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
+       if (filter_cpu(sample))
                goto out_put;
 
        if (scripting_ops)
@@ -2041,9 +2067,11 @@ static int process_comm_event(struct perf_tool *tool,
                sample->tid = event->comm.tid;
                sample->pid = event->comm.pid;
        }
-       perf_sample__fprintf_start(sample, thread, evsel,
+       if (!filter_cpu(sample)) {
+               perf_sample__fprintf_start(sample, thread, evsel,
                                   PERF_RECORD_COMM, stdout);
-       perf_event__fprintf(event, stdout);
+               perf_event__fprintf(event, stdout);
+       }
        ret = 0;
 out:
        thread__put(thread);
@@ -2077,9 +2105,11 @@ static int process_namespaces_event(struct perf_tool *tool,
                sample->tid = event->namespaces.tid;
                sample->pid = event->namespaces.pid;
        }
-       perf_sample__fprintf_start(sample, thread, evsel,
-                                  PERF_RECORD_NAMESPACES, stdout);
-       perf_event__fprintf(event, stdout);
+       if (!filter_cpu(sample)) {
+               perf_sample__fprintf_start(sample, thread, evsel,
+                                          PERF_RECORD_NAMESPACES, stdout);
+               perf_event__fprintf(event, stdout);
+       }
        ret = 0;
 out:
        thread__put(thread);
@@ -2111,9 +2141,11 @@ static int process_fork_event(struct perf_tool *tool,
                sample->tid = event->fork.tid;
                sample->pid = event->fork.pid;
        }
-       perf_sample__fprintf_start(sample, thread, evsel,
-                                  PERF_RECORD_FORK, stdout);
-       perf_event__fprintf(event, stdout);
+       if (!filter_cpu(sample)) {
+               perf_sample__fprintf_start(sample, thread, evsel,
+                                          PERF_RECORD_FORK, stdout);
+               perf_event__fprintf(event, stdout);
+       }
        thread__put(thread);
 
        return 0;
@@ -2141,9 +2173,11 @@ static int process_exit_event(struct perf_tool *tool,
                sample->tid = event->fork.tid;
                sample->pid = event->fork.pid;
        }
-       perf_sample__fprintf_start(sample, thread, evsel,
-                                  PERF_RECORD_EXIT, stdout);
-       perf_event__fprintf(event, stdout);
+       if (!filter_cpu(sample)) {
+               perf_sample__fprintf_start(sample, thread, evsel,
+                                          PERF_RECORD_EXIT, stdout);
+               perf_event__fprintf(event, stdout);
+       }
 
        if (perf_event__process_exit(tool, event, sample, machine) < 0)
                err = -1;
@@ -2177,9 +2211,11 @@ static int process_mmap_event(struct perf_tool *tool,
                sample->tid = event->mmap.tid;
                sample->pid = event->mmap.pid;
        }
-       perf_sample__fprintf_start(sample, thread, evsel,
-                                  PERF_RECORD_MMAP, stdout);
-       perf_event__fprintf(event, stdout);
+       if (!filter_cpu(sample)) {
+               perf_sample__fprintf_start(sample, thread, evsel,
+                                          PERF_RECORD_MMAP, stdout);
+               perf_event__fprintf(event, stdout);
+       }
        thread__put(thread);
        return 0;
 }
@@ -2209,9 +2245,11 @@ static int process_mmap2_event(struct perf_tool *tool,
                sample->tid = event->mmap2.tid;
                sample->pid = event->mmap2.pid;
        }
-       perf_sample__fprintf_start(sample, thread, evsel,
-                                  PERF_RECORD_MMAP2, stdout);
-       perf_event__fprintf(event, stdout);
+       if (!filter_cpu(sample)) {
+               perf_sample__fprintf_start(sample, thread, evsel,
+                                          PERF_RECORD_MMAP2, stdout);
+               perf_event__fprintf(event, stdout);
+       }
        thread__put(thread);
        return 0;
 }
@@ -2236,9 +2274,11 @@ static int process_switch_event(struct perf_tool *tool,
                return -1;
        }
 
-       perf_sample__fprintf_start(sample, thread, evsel,
-                                  PERF_RECORD_SWITCH, stdout);
-       perf_event__fprintf(event, stdout);
+       if (!filter_cpu(sample)) {
+               perf_sample__fprintf_start(sample, thread, evsel,
+                                          PERF_RECORD_SWITCH, stdout);
+               perf_event__fprintf(event, stdout);
+       }
        thread__put(thread);
        return 0;
 }
@@ -2259,9 +2299,11 @@ process_lost_event(struct perf_tool *tool,
        if (thread == NULL)
                return -1;
 
-       perf_sample__fprintf_start(sample, thread, evsel,
-                                  PERF_RECORD_LOST, stdout);
-       perf_event__fprintf(event, stdout);
+       if (!filter_cpu(sample)) {
+               perf_sample__fprintf_start(sample, thread, evsel,
+                                          PERF_RECORD_LOST, stdout);
+               perf_event__fprintf(event, stdout);
+       }
        thread__put(thread);
        return 0;
 }
@@ -2948,7 +2990,8 @@ static int check_ev_match(char *dir_name, char *scriptname,
  * will list all statically runnable scripts, select one, execute it and
  * show the output in a perf browser.
  */
-int find_scripts(char **scripts_array, char **scripts_path_array)
+int find_scripts(char **scripts_array, char **scripts_path_array, int num,
+                int pathlen)
 {
        struct dirent *script_dirent, *lang_dirent;
        char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
@@ -2993,7 +3036,10 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
                        /* Skip those real time scripts: xxxtop.p[yl] */
                        if (strstr(script_dirent->d_name, "top."))
                                continue;
-                       sprintf(scripts_path_array[i], "%s/%s", lang_path,
+                       if (i >= num)
+                               break;
+                       snprintf(scripts_path_array[i], pathlen, "%s/%s",
+                               lang_path,
                                script_dirent->d_name);
                        temp = strchr(script_dirent->d_name, '.');
                        snprintf(scripts_array[i],
@@ -3232,7 +3278,7 @@ static int parse_insn_trace(const struct option *opt __maybe_unused,
 {
        parse_output_fields(NULL, "+insn,-event,-period", 0);
        itrace_parse_synth_opts(opt, "i0ns", 0);
-       nanosecs = true;
+       symbol_conf.nanosecs = true;
        return 0;
 }
 
@@ -3250,7 +3296,7 @@ static int parse_call_trace(const struct option *opt __maybe_unused,
 {
        parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent", 0);
        itrace_parse_synth_opts(opt, "cewp", 0);
-       nanosecs = true;
+       symbol_conf.nanosecs = true;
        return 0;
 }
 
@@ -3260,7 +3306,7 @@ static int parse_callret_trace(const struct option *opt __maybe_unused,
 {
        parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent,+flags", 0);
        itrace_parse_synth_opts(opt, "crewp", 0);
-       nanosecs = true;
+       symbol_conf.nanosecs = true;
        return 0;
 }
 
@@ -3277,6 +3323,7 @@ int cmd_script(int argc, const char **argv)
                .set = false,
                .default_no_sample = true,
        };
+       struct utsname uts;
        char *script_path = NULL;
        const char **__argv;
        int i, j, err = 0;
@@ -3374,6 +3421,7 @@ int cmd_script(int argc, const char **argv)
                     "Set the maximum stack depth when parsing the callchain, "
                     "anything beyond the specified depth will be ignored. "
                     "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
+       OPT_BOOLEAN(0, "reltime", &reltime, "Show time stamps relative to start"),
        OPT_BOOLEAN('I', "show-info", &show_full_info,
                    "display extended information from perf.data file"),
        OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path,
@@ -3395,7 +3443,7 @@ int cmd_script(int argc, const char **argv)
        OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
        OPT_INTEGER(0, "max-blocks", &max_blocks,
                    "Maximum number of code blocks to dump with brstackinsn"),
-       OPT_BOOLEAN(0, "ns", &nanosecs,
+       OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs,
                    "Use 9 decimal places when displaying time"),
        OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
                            "Instruction Tracing options\n" ITRACE_HELP,
@@ -3448,6 +3496,11 @@ int cmd_script(int argc, const char **argv)
                }
        }
 
+       if (script.time_str && reltime) {
+               fprintf(stderr, "Don't combine --reltime with --time\n");
+               return -1;
+       }
+
        if (itrace_synth_opts.callchain &&
            itrace_synth_opts.callchain_sz > scripting_max_stack)
                scripting_max_stack = itrace_synth_opts.callchain_sz;
@@ -3615,6 +3668,12 @@ int cmd_script(int argc, const char **argv)
        if (symbol__init(&session->header.env) < 0)
                goto out_delete;
 
+       uname(&uts);
+       if (!strcmp(uts.machine, session->header.env.arch) ||
+           (!strcmp(uts.machine, "x86_64") &&
+            !strcmp(session->header.env.arch, "i386")))
+               native_arch = true;
+
        script.session = session;
        script__setup_sample_type(&script);
 
index 7b8f09b0b8bf7139463fb8d8736346c73f3cfe65..a3c060878faab185ee2174f2e8a862ac039b6fe6 100644 (file)
@@ -244,11 +244,25 @@ perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread,
                                           process_synthesized_event, NULL);
 }
 
+static int read_single_counter(struct perf_evsel *counter, int cpu,
+                              int thread, struct timespec *rs)
+{
+       if (counter->tool_event == PERF_TOOL_DURATION_TIME) {
+               u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL;
+               struct perf_counts_values *count =
+                       perf_counts(counter->counts, cpu, thread);
+               count->ena = count->run = val;
+               count->val = val;
+               return 0;
+       }
+       return perf_evsel__read_counter(counter, cpu, thread);
+}
+
 /*
  * Read out the results of a single counter:
  * do not aggregate counts across CPUs in system-wide mode
  */
-static int read_counter(struct perf_evsel *counter)
+static int read_counter(struct perf_evsel *counter, struct timespec *rs)
 {
        int nthreads = thread_map__nr(evsel_list->threads);
        int ncpus, cpu, thread;
@@ -275,7 +289,7 @@ static int read_counter(struct perf_evsel *counter)
                         * (via perf_evsel__read_counter) and sets threir count->loaded.
                         */
                        if (!count->loaded &&
-                           perf_evsel__read_counter(counter, cpu, thread)) {
+                           read_single_counter(counter, cpu, thread, rs)) {
                                counter->counts->scaled = -1;
                                perf_counts(counter->counts, cpu, thread)->ena = 0;
                                perf_counts(counter->counts, cpu, thread)->run = 0;
@@ -304,13 +318,13 @@ static int read_counter(struct perf_evsel *counter)
        return 0;
 }
 
-static void read_counters(void)
+static void read_counters(struct timespec *rs)
 {
        struct perf_evsel *counter;
        int ret;
 
        evlist__for_each_entry(evsel_list, counter) {
-               ret = read_counter(counter);
+               ret = read_counter(counter, rs);
                if (ret)
                        pr_debug("failed to read counter %s\n", counter->name);
 
@@ -323,11 +337,11 @@ static void process_interval(void)
 {
        struct timespec ts, rs;
 
-       read_counters();
-
        clock_gettime(CLOCK_MONOTONIC, &ts);
        diff_timespec(&rs, &ts, &ref_time);
 
+       read_counters(&rs);
+
        if (STAT_RECORD) {
                if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL))
                        pr_err("failed to write stat round event\n");
@@ -593,7 +607,7 @@ try_again:
         * avoid arbitrary skew, we must read all counters before closing any
         * group leaders.
         */
-       read_counters();
+       read_counters(&(struct timespec) { .tv_nsec = t1-t0 });
        perf_evlist__close(evsel_list);
 
        return WEXITSTATUS(status);
@@ -718,7 +732,8 @@ static struct option stat_options[] = {
                    "system-wide collection from all CPUs"),
        OPT_BOOLEAN('g', "group", &group,
                    "put the counters into a counter group"),
-       OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
+       OPT_BOOLEAN(0, "scale", &stat_config.scale,
+                   "Use --no-scale to disable counter scaling for multiplexing"),
        OPT_INCR('v', "verbose", &verbose,
                    "be more verbose (show counter open errors, etc)"),
        OPT_INTEGER('r', "repeat", &stat_config.run_count,
@@ -1307,6 +1322,7 @@ static void init_features(struct perf_session *session)
        for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
                perf_header__set_feat(&session->header, feat);
 
+       perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
        perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
        perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
        perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
index 231a90daa958131e132fb79ef7f7d4b0f3d10ff0..fbbb0da43abbad579f354ac909a6ffccec14b5ce 100644 (file)
@@ -1189,30 +1189,26 @@ static int __cmd_top(struct perf_top *top)
        pthread_t thread, thread_process;
        int ret;
 
-       top->session = perf_session__new(NULL, false, NULL);
-       if (top->session == NULL)
-               return -1;
-
        if (!top->annotation_opts.objdump_path) {
                ret = perf_env__lookup_objdump(&top->session->header.env,
                                               &top->annotation_opts.objdump_path);
                if (ret)
-                       goto out_delete;
+                       return ret;
        }
 
        ret = callchain_param__setup_sample_type(&callchain_param);
        if (ret)
-               goto out_delete;
+               return ret;
 
        if (perf_session__register_idle_thread(top->session) < 0)
-               goto out_delete;
+               return ret;
 
        if (top->nr_threads_synthesize > 1)
                perf_set_multithreaded();
 
        init_process_thread(top);
 
-       ret = perf_event__synthesize_bpf_events(&top->tool, perf_event__process,
+       ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
                                                &top->session->machines.host,
                                                &top->record_opts);
        if (ret < 0)
@@ -1227,13 +1223,18 @@ static int __cmd_top(struct perf_top *top)
 
        if (perf_hpp_list.socket) {
                ret = perf_env__read_cpu_topology_map(&perf_env);
-               if (ret < 0)
-                       goto out_err_cpu_topo;
+               if (ret < 0) {
+                       char errbuf[BUFSIZ];
+                       const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
+
+                       ui__error("Could not read the CPU topology map: %s\n", err);
+                       return ret;
+               }
        }
 
        ret = perf_top__start_counters(top);
        if (ret)
-               goto out_delete;
+               return ret;
 
        top->session->evlist = top->evlist;
        perf_session__set_id_hdr_size(top->session);
@@ -1252,7 +1253,7 @@ static int __cmd_top(struct perf_top *top)
        ret = -1;
        if (pthread_create(&thread_process, NULL, process_thread, top)) {
                ui__error("Could not create process thread.\n");
-               goto out_delete;
+               return ret;
        }
 
        if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
@@ -1296,19 +1297,7 @@ out_join:
 out_join_thread:
        pthread_cond_signal(&top->qe.cond);
        pthread_join(thread_process, NULL);
-out_delete:
-       perf_session__delete(top->session);
-       top->session = NULL;
-
        return ret;
-
-out_err_cpu_topo: {
-       char errbuf[BUFSIZ];
-       const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
-
-       ui__error("Could not read the CPU topology map: %s\n", err);
-       goto out_delete;
-}
 }
 
 static int
@@ -1388,6 +1377,7 @@ int cmd_top(int argc, const char **argv)
                         * */
                        .overwrite      = 0,
                        .sample_time    = true,
+                       .sample_time_set = true,
                },
                .max_stack           = sysctl__max_stack(),
                .annotation_opts     = annotation__default_options,
@@ -1480,6 +1470,7 @@ int cmd_top(int argc, const char **argv)
                    "Display raw encoding of assembly instructions (default)"),
        OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
                    "Enable kernel symbol demangling"),
+       OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
        OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
                    "objdump binary to use for disassembly and annotations"),
        OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
@@ -1511,6 +1502,7 @@ int cmd_top(int argc, const char **argv)
                        "number of thread to run event synthesize"),
        OPT_END()
        };
+       struct perf_evlist *sb_evlist = NULL;
        const char * const top_usage[] = {
                "perf top [<options>]",
                NULL
@@ -1628,8 +1620,9 @@ int cmd_top(int argc, const char **argv)
        annotation_config__init();
 
        symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
-       if (symbol__init(NULL) < 0)
-               return -1;
+       status = symbol__init(NULL);
+       if (status < 0)
+               goto out_delete_evlist;
 
        sort__setup_elide(stdout);
 
@@ -1639,10 +1632,28 @@ int cmd_top(int argc, const char **argv)
                signal(SIGWINCH, winch_sig);
        }
 
+       top.session = perf_session__new(NULL, false, NULL);
+       if (top.session == NULL) {
+               status = -1;
+               goto out_delete_evlist;
+       }
+
+       if (!top.record_opts.no_bpf_event)
+               bpf_event__add_sb_event(&sb_evlist, &perf_env);
+
+       if (perf_evlist__start_sb_thread(sb_evlist, target)) {
+               pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
+               opts->no_bpf_event = true;
+       }
+
        status = __cmd_top(&top);
 
+       if (!opts->no_bpf_event)
+               perf_evlist__stop_sb_thread(sb_evlist);
+
 out_delete_evlist:
        perf_evlist__delete(top.evlist);
+       perf_session__delete(top.session);
 
        return status;
 }
index 50df168be326d84cba4e5cfbc26ea8a119d392cf..f470144d1a7043ecf9fbcc3467d15c6df148b3d1 100644 (file)
@@ -78,6 +78,8 @@ static void library_status(void)
        STATUS(HAVE_LZMA_SUPPORT, lzma);
        STATUS(HAVE_AUXTRACE_SUPPORT, get_cpuid);
        STATUS(HAVE_LIBBPF_SUPPORT, bpf);
+       STATUS(HAVE_AIO_SUPPORT, aio);
+       STATUS(HAVE_ZSTD_SUPPORT, zstd);
 }
 
 int cmd_version(int argc, const char **argv)
index 05745f3ce912dadf75495297d2b9978327d97774..999fe9170122e3962b5154842344d9e0639febee 100644 (file)
@@ -40,5 +40,6 @@ int cmd_mem(int argc, const char **argv);
 int cmd_data(int argc, const char **argv);
 int cmd_ftrace(int argc, const char **argv);
 
-int find_scripts(char **scripts_array, char **scripts_path_array);
+int find_scripts(char **scripts_array, char **scripts_path_array, int num,
+                int pathlen);
 #endif
index 7b55613924ded7a0f965c80799b1d9230bc320d9..c68ee06cae637fe5aad2c51e2dba1e16af0b5064 100755 (executable)
@@ -103,7 +103,7 @@ done
 # diff with extra ignore lines
 check arch/x86/lib/memcpy_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
 check arch/x86/lib/memset_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
-check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common.h>"'
+check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"'
 check include/uapi/linux/mman.h       '-I "^#include <\(uapi/\)*asm/mman.h>"'
 
 # diff non-symmetric files
index f9b2161e1ca493c908d1f4265fe77e7847dee969..2422894a81946aa28b97be3dcd42bbd17139ae2b 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <unistd.h>
+#include <linux/limits.h>
 #include <pid_filter.h>
 
 /* bpf-output associated map */
@@ -41,32 +42,110 @@ struct syscall_exit_args {
 struct augmented_filename {
        unsigned int    size;
        int             reserved;
-       char            value[256];
+       char            value[PATH_MAX];
 };
 
-#define SYS_OPEN 2
-#define SYS_ACCESS 21
-#define SYS_OPENAT 257
+/* syscalls where the first arg is a string */
+#define SYS_OPEN                 2
+#define SYS_STAT                 4
+#define SYS_LSTAT                6
+#define SYS_ACCESS              21
+#define SYS_EXECVE              59
+#define SYS_TRUNCATE            76
+#define SYS_CHDIR               80
+#define SYS_RENAME              82
+#define SYS_MKDIR               83
+#define SYS_RMDIR               84
+#define SYS_CREAT               85
+#define SYS_LINK                86
+#define SYS_UNLINK              87
+#define SYS_SYMLINK             88
+#define SYS_READLINK            89
+#define SYS_CHMOD               90
+#define SYS_CHOWN               92
+#define SYS_LCHOWN              94
+#define SYS_MKNOD              133
+#define SYS_STATFS             137
+#define SYS_PIVOT_ROOT         155
+#define SYS_CHROOT             161
+#define SYS_ACCT               163
+#define SYS_SWAPON             167
+#define SYS_SWAPOFF            168
+#define SYS_DELETE_MODULE      176
+#define SYS_SETXATTR           188
+#define SYS_LSETXATTR          189
+#define SYS_GETXATTR           191
+#define SYS_LGETXATTR          192
+#define SYS_LISTXATTR          194
+#define SYS_LLISTXATTR         195
+#define SYS_REMOVEXATTR        197
+#define SYS_LREMOVEXATTR       198
+#define SYS_MQ_OPEN            240
+#define SYS_MQ_UNLINK          241
+#define SYS_ADD_KEY            248
+#define SYS_REQUEST_KEY        249
+#define SYS_SYMLINKAT          266
+#define SYS_MEMFD_CREATE       319
+
+/* syscalls where the first arg is a string */
+
+#define SYS_PWRITE64            18
+#define SYS_EXECVE              59
+#define SYS_RENAME              82
+#define SYS_QUOTACTL           179
+#define SYS_FSETXATTR          190
+#define SYS_FGETXATTR          193
+#define SYS_FREMOVEXATTR       199
+#define SYS_MQ_TIMEDSEND       242
+#define SYS_REQUEST_KEY        249
+#define SYS_INOTIFY_ADD_WATCH  254
+#define SYS_OPENAT             257
+#define SYS_MKDIRAT            258
+#define SYS_MKNODAT            259
+#define SYS_FCHOWNAT           260
+#define SYS_FUTIMESAT          261
+#define SYS_NEWFSTATAT         262
+#define SYS_UNLINKAT           263
+#define SYS_RENAMEAT           264
+#define SYS_LINKAT             265
+#define SYS_READLINKAT         267
+#define SYS_FCHMODAT           268
+#define SYS_FACCESSAT          269
+#define SYS_UTIMENSAT          280
+#define SYS_NAME_TO_HANDLE_AT  303
+#define SYS_FINIT_MODULE       313
+#define SYS_RENAMEAT2          316
+#define SYS_EXECVEAT           322
+#define SYS_STATX              332
 
 pid_filter(pids_filtered);
 
+struct augmented_args_filename {
+       struct syscall_enter_args args;
+       struct augmented_filename filename;
+};
+
+bpf_map(augmented_filename_map, PERCPU_ARRAY, int, struct augmented_args_filename, 1);
+
 SEC("raw_syscalls:sys_enter")
 int sys_enter(struct syscall_enter_args *args)
 {
-       struct {
-               struct syscall_enter_args args;
-               struct augmented_filename filename;
-       } augmented_args;
-       struct syscall *syscall;
-       unsigned int len = sizeof(augmented_args);
+       struct augmented_args_filename *augmented_args;
+       unsigned int len = sizeof(*augmented_args);
        const void *filename_arg = NULL;
+       struct syscall *syscall;
+       int key = 0;
+
+        augmented_args = bpf_map_lookup_elem(&augmented_filename_map, &key);
+        if (augmented_args == NULL)
+                return 1;
 
        if (pid_filter__has(&pids_filtered, getpid()))
                return 0;
 
-       probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
+       probe_read(&augmented_args->args, sizeof(augmented_args->args), args);
 
-       syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
+       syscall = bpf_map_lookup_elem(&syscalls, &augmented_args->args.syscall_nr);
        if (syscall == NULL || !syscall->enabled)
                return 0;
        /*
@@ -109,30 +188,105 @@ int sys_enter(struct syscall_enter_args *args)
         *
         *       after the ctx memory access to prevent their down stream merging.
         */
-       switch (augmented_args.args.syscall_nr) {
+       /*
+        * This table of what args are strings will be provided by userspace,
+        * in the syscalls map, i.e. we will already have to do the lookup to
+        * see if this specific syscall is filtered, so we can as well get more
+        * info about what syscall args are strings or pointers, and how many
+        * bytes to copy, per arg, etc.
+        *
+        * For now hard code it, till we have all the basic mechanisms in place
+        * to automate everything and make the kernel part be completely driven
+        * by information obtained in userspace for each kernel version and
+        * processor architecture, making the kernel part the same no matter what
+        * kernel version or processor architecture it runs on.
+        */
+       switch (augmented_args->args.syscall_nr) {
+       case SYS_ACCT:
+       case SYS_ADD_KEY:
+       case SYS_CHDIR:
+       case SYS_CHMOD:
+       case SYS_CHOWN:
+       case SYS_CHROOT:
+       case SYS_CREAT:
+       case SYS_DELETE_MODULE:
+       case SYS_EXECVE:
+       case SYS_GETXATTR:
+       case SYS_LCHOWN:
+       case SYS_LGETXATTR:
+       case SYS_LINK:
+       case SYS_LISTXATTR:
+       case SYS_LLISTXATTR:
+       case SYS_LREMOVEXATTR:
+       case SYS_LSETXATTR:
+       case SYS_LSTAT:
+       case SYS_MEMFD_CREATE:
+       case SYS_MKDIR:
+       case SYS_MKNOD:
+       case SYS_MQ_OPEN:
+       case SYS_MQ_UNLINK:
+       case SYS_PIVOT_ROOT:
+       case SYS_READLINK:
+       case SYS_REMOVEXATTR:
+       case SYS_RENAME:
+       case SYS_REQUEST_KEY:
+       case SYS_RMDIR:
+       case SYS_SETXATTR:
+       case SYS_STAT:
+       case SYS_STATFS:
+       case SYS_SWAPOFF:
+       case SYS_SWAPON:
+       case SYS_SYMLINK:
+       case SYS_SYMLINKAT:
+       case SYS_TRUNCATE:
+       case SYS_UNLINK:
        case SYS_ACCESS:
        case SYS_OPEN:   filename_arg = (const void *)args->args[0];
                        __asm__ __volatile__("": : :"memory");
                         break;
+       case SYS_EXECVEAT:
+       case SYS_FACCESSAT:
+       case SYS_FCHMODAT:
+       case SYS_FCHOWNAT:
+       case SYS_FGETXATTR:
+       case SYS_FINIT_MODULE:
+       case SYS_FREMOVEXATTR:
+       case SYS_FSETXATTR:
+       case SYS_FUTIMESAT:
+       case SYS_INOTIFY_ADD_WATCH:
+       case SYS_LINKAT:
+       case SYS_MKDIRAT:
+       case SYS_MKNODAT:
+       case SYS_MQ_TIMEDSEND:
+       case SYS_NAME_TO_HANDLE_AT:
+       case SYS_NEWFSTATAT:
+       case SYS_PWRITE64:
+       case SYS_QUOTACTL:
+       case SYS_READLINKAT:
+       case SYS_RENAMEAT:
+       case SYS_RENAMEAT2:
+       case SYS_STATX:
+       case SYS_UNLINKAT:
+       case SYS_UTIMENSAT:
        case SYS_OPENAT: filename_arg = (const void *)args->args[1];
                         break;
        }
 
        if (filename_arg != NULL) {
-               augmented_args.filename.reserved = 0;
-               augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
-                                                             sizeof(augmented_args.filename.value),
+               augmented_args->filename.reserved = 0;
+               augmented_args->filename.size = probe_read_str(&augmented_args->filename.value,
+                                                             sizeof(augmented_args->filename.value),
                                                              filename_arg);
-               if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
-                       len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
-                       len &= sizeof(augmented_args.filename.value) - 1;
+               if (augmented_args->filename.size < sizeof(augmented_args->filename.value)) {
+                       len -= sizeof(augmented_args->filename.value) - augmented_args->filename.size;
+                       len &= sizeof(augmented_args->filename.value) - 1;
                }
        } else {
-               len = sizeof(augmented_args.args);
+               len = sizeof(augmented_args->args);
        }
 
        /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
-       return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
+       return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, augmented_args, len);
 }
 
 SEC("raw_syscalls:sys_exit")
index a11cb006f9682ed15300ee6fa1abce0c0125f78e..72df4b6fa36fd12ad45cf00cdb0750d27c5d5061 100644 (file)
@@ -298,6 +298,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
                use_pager = 1;
        commit_pager_choice();
 
+       perf_env__init(&perf_env);
        perf_env__set_cmdline(&perf_env, argc, argv);
        status = p->fn(argc, argv);
        perf_config__exit();
index b120e547ddc7b7fd9b4a03c8723a797f010b9511..369eae61068de43e3b1bf95bc6b0d995cfbc1726 100644 (file)
@@ -66,7 +66,7 @@ struct record_opts {
        bool         ignore_missing_thread;
        bool         strict_freq;
        bool         sample_id;
-       bool         bpf_event;
+       bool         no_bpf_event;
        unsigned int freq;
        unsigned int mmap_pages;
        unsigned int auxtrace_mmap_pages;
@@ -85,6 +85,7 @@ struct record_opts {
        u64          clockid_res_ns;
        int          nr_cblocks;
        int          affinity;
+       int          mmap_flush;
 };
 
 enum perf_affinity {
index 704302c3e67dd16752cdcb10aac076d7dd4fa260..9dc2f6b70354a2327db3b4a94127b83b63b55f49 100644 (file)
     "BriefDescription": "CO mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x517082",
-    "EventName": "PM_CO_DISP_FAIL",
-    "BriefDescription": "CO dispatch failed due to all CO machines being busy",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x527084",
-    "EventName": "PM_CO_TM_SC_FOOTPRINT",
-    "BriefDescription": "L2 did a cleanifdirty CO to the L3 (ie created an SC line in the L3)",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x3608a",
     "EventName": "PM_CO_USAGE",
     "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L3 on the same chip due to a instruction side request",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x617082",
-    "EventName": "PM_ISIDE_DISP",
-    "BriefDescription": "All i-side dispatch attempts",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x627084",
-    "EventName": "PM_ISIDE_DISP_FAIL",
-    "BriefDescription": "All i-side dispatch attempts that failed due to a addr collision with another machine",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x627086",
-    "EventName": "PM_ISIDE_DISP_FAIL_OTHER",
-    "BriefDescription": "All i-side dispatch attempts that failed due to a reason other than addrs collision",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x4608e",
     "EventName": "PM_ISIDE_L2MEMACC",
     "BriefDescription": "valid when first beat of data comes in for an i-side fetch where data came from mem(or L4)",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x44608e",
-    "EventName": "PM_ISIDE_MRU_TOUCH",
-    "BriefDescription": "Iside L2 MRU touch",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x30ac",
     "EventName": "PM_ISU_REF_FX0",
     "BriefDescription": "Instruction Demand sectors wriittent into IL1",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x417080",
-    "EventName": "PM_L2_CASTOUT_MOD",
-    "BriefDescription": "L2 Castouts - Modified (M, Mu, Me)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x417082",
-    "EventName": "PM_L2_CASTOUT_SHR",
-    "BriefDescription": "L2 Castouts - Shared (T, Te, Si, S)",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x27084",
     "EventName": "PM_L2_CHIP_PUMP",
     "BriefDescription": "RC requests that were local on chip pump attempts",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x427086",
-    "EventName": "PM_L2_DC_INV",
-    "BriefDescription": "Dcache invalidates from L2",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x44608c",
-    "EventName": "PM_L2_DISP_ALL_L2MISS",
-    "BriefDescription": "All successful Ld/St dispatches for this thread that were an L2miss",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x27086",
     "EventName": "PM_L2_GROUP_PUMP",
     "BriefDescription": "RC requests that were on Node Pump attempts",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x626084",
-    "EventName": "PM_L2_GRP_GUESS_CORRECT",
-    "BriefDescription": "L2 guess grp and guess was correct (data intra-6chip AND ^on-chip)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x626086",
-    "EventName": "PM_L2_GRP_GUESS_WRONG",
-    "BriefDescription": "L2 guess grp and guess was not correct (ie data on-chip OR beyond-6chip)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x427084",
-    "EventName": "PM_L2_IC_INV",
-    "BriefDescription": "Icache Invalidates from L2",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x436088",
-    "EventName": "PM_L2_INST",
-    "BriefDescription": "All successful I-side dispatches for this thread (excludes i_l2mru_tch reqs)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x43608a",
-    "EventName": "PM_L2_INST_MISS",
-    "BriefDescription": "All successful i-side dispatches that were an L2miss for this thread (excludes i_l2mru_tch reqs)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x416080",
-    "EventName": "PM_L2_LD",
-    "BriefDescription": "All successful D-side Load dispatches for this thread",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x437088",
-    "EventName": "PM_L2_LD_DISP",
-    "BriefDescription": "All successful load dispatches",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x43708a",
-    "EventName": "PM_L2_LD_HIT",
-    "BriefDescription": "All successful load dispatches that were L2 hits",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x426084",
-    "EventName": "PM_L2_LD_MISS",
-    "BriefDescription": "All successful D-Side Load dispatches that were an L2miss for this thread",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x616080",
-    "EventName": "PM_L2_LOC_GUESS_CORRECT",
-    "BriefDescription": "L2 guess loc and guess was correct (ie data local)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x616082",
-    "EventName": "PM_L2_LOC_GUESS_WRONG",
-    "BriefDescription": "L2 guess loc and guess was not correct (ie data not on chip)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x516080",
-    "EventName": "PM_L2_RCLD_DISP",
-    "BriefDescription": "L2 RC load dispatch attempt",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x516082",
-    "EventName": "PM_L2_RCLD_DISP_FAIL_ADDR",
-    "BriefDescription": "L2 RC load dispatch attempt failed due to address collision with RC/CO/SN/SQ",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x526084",
-    "EventName": "PM_L2_RCLD_DISP_FAIL_OTHER",
-    "BriefDescription": "L2 RC load dispatch attempt failed due to other reasons",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x536088",
-    "EventName": "PM_L2_RCST_DISP",
-    "BriefDescription": "L2 RC store dispatch attempt",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x53608a",
-    "EventName": "PM_L2_RCST_DISP_FAIL_ADDR",
-    "BriefDescription": "L2 RC store dispatch attempt failed due to address collision with RC/CO/SN/SQ",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x54608c",
-    "EventName": "PM_L2_RCST_DISP_FAIL_OTHER",
-    "BriefDescription": "L2 RC store dispatch attempt failed due to other reasons",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x537088",
-    "EventName": "PM_L2_RC_ST_DONE",
-    "BriefDescription": "RC did st to line that was Tx or Sx",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x63708a",
-    "EventName": "PM_L2_RTY_LD",
-    "BriefDescription": "RC retries on PB for any load from core",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x3708a",
     "EventName": "PM_L2_RTY_ST",
     "BriefDescription": "RC retries on PB for any store from core",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x54708c",
-    "EventName": "PM_L2_SN_M_RD_DONE",
-    "BriefDescription": "SNP dispatched for a read and was M",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x54708e",
-    "EventName": "PM_L2_SN_M_WR_DONE",
-    "BriefDescription": "SNP dispatched for a write and was M",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x53708a",
-    "EventName": "PM_L2_SN_SX_I_DONE",
-    "BriefDescription": "SNP dispatched and went from Sx or Tx to Ix",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x17080",
     "EventName": "PM_L2_ST",
     "BriefDescription": "All successful D-side store dispatches for this thread",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x44708c",
-    "EventName": "PM_L2_ST_DISP",
-    "BriefDescription": "All successful store dispatches",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x44708e",
-    "EventName": "PM_L2_ST_HIT",
-    "BriefDescription": "All successful store dispatches that were L2Hits",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x17082",
     "EventName": "PM_L2_ST_MISS",
     "BriefDescription": "All successful D-side store dispatches for this thread that were L2 Miss",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x636088",
-    "EventName": "PM_L2_SYS_GUESS_CORRECT",
-    "BriefDescription": "L2 guess sys and guess was correct (ie data beyond-6chip)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x63608a",
-    "EventName": "PM_L2_SYS_GUESS_WRONG",
-    "BriefDescription": "L2 guess sys and guess was not correct (ie data ^beyond-6chip)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x617080",
-    "EventName": "PM_L2_SYS_PUMP",
-    "BriefDescription": "RC requests that were system pump attempts",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x1e05e",
     "EventName": "PM_L2_TM_REQ_ABORT",
     "BriefDescription": "TM marked store abort",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x23808a",
-    "EventName": "PM_L3_CINJ",
-    "BriefDescription": "l3 ci of cache inject",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x128084",
-    "EventName": "PM_L3_CI_HIT",
-    "BriefDescription": "L3 Castins Hit (total count",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x128086",
-    "EventName": "PM_L3_CI_MISS",
-    "BriefDescription": "L3 castins miss (total count",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x819082",
     "EventName": "PM_L3_CI_USAGE",
     "BriefDescription": "rotating sample of 16 CI or CO actives",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x438088",
-    "EventName": "PM_L3_CO",
-    "BriefDescription": "l3 castout occurring ( does not include casthrough or log writes (cinj/dmaw)",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x83908b",
     "EventName": "PM_L3_CO0_ALLOC",
     "BriefDescription": "L3 CO to L3.1 OR of port 0 and 1 ( lossy)",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x238088",
-    "EventName": "PM_L3_CO_LCO",
-    "BriefDescription": "Total L3 castouts occurred on LCO",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x28084",
     "EventName": "PM_L3_CO_MEM",
     "BriefDescription": "L3 CO to memory OR of port 0 and 1 ( lossy)",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0xb19082",
-    "EventName": "PM_L3_GRP_GUESS_CORRECT",
-    "BriefDescription": "Initial scope=group and data from same group (near) (pred successful)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xb3908a",
-    "EventName": "PM_L3_GRP_GUESS_WRONG_HIGH",
-    "BriefDescription": "Initial scope=group but data from local node. Predition too high",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xb39088",
-    "EventName": "PM_L3_GRP_GUESS_WRONG_LOW",
-    "BriefDescription": "Initial scope=group but data from outside group (far or rem). Prediction too Low",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x218080",
-    "EventName": "PM_L3_HIT",
-    "BriefDescription": "L3 Hits",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x138088",
-    "EventName": "PM_L3_L2_CO_HIT",
-    "BriefDescription": "L2 castout hits",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x13808a",
-    "EventName": "PM_L3_L2_CO_MISS",
-    "BriefDescription": "L2 castout miss",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x14808c",
-    "EventName": "PM_L3_LAT_CI_HIT",
-    "BriefDescription": "L3 Lateral Castins Hit",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x14808e",
-    "EventName": "PM_L3_LAT_CI_MISS",
-    "BriefDescription": "L3 Lateral Castins Miss",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x228084",
-    "EventName": "PM_L3_LD_HIT",
-    "BriefDescription": "L3 demand LD Hits",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x228086",
-    "EventName": "PM_L3_LD_MISS",
-    "BriefDescription": "L3 demand LD Miss",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x1e052",
     "EventName": "PM_L3_LD_PREF",
     "BriefDescription": "L3 Load Prefetches",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0xb19080",
-    "EventName": "PM_L3_LOC_GUESS_CORRECT",
-    "BriefDescription": "initial scope=node/chip and data from local node (local) (pred successful)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xb29086",
-    "EventName": "PM_L3_LOC_GUESS_WRONG",
-    "BriefDescription": "Initial scope=node but data from out side local node (near or far or rem). Prediction too Low",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x218082",
-    "EventName": "PM_L3_MISS",
-    "BriefDescription": "L3 Misses",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x54808c",
-    "EventName": "PM_L3_P0_CO_L31",
-    "BriefDescription": "l3 CO to L3.1 (lco) port 0",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x538088",
-    "EventName": "PM_L3_P0_CO_MEM",
-    "BriefDescription": "l3 CO to memory port 0",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x929084",
-    "EventName": "PM_L3_P0_CO_RTY",
-    "BriefDescription": "L3 CO received retry port 0",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0xa29084",
     "EventName": "PM_L3_P0_GRP_PUMP",
     "BriefDescription": "L3 LCO received retry port 0",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0xa19080",
-    "EventName": "PM_L3_P0_NODE_PUMP",
-    "BriefDescription": "L3 pf sent with nodal scope port 0",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x919080",
-    "EventName": "PM_L3_P0_PF_RTY",
-    "BriefDescription": "L3 PF received retry port 0",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x939088",
-    "EventName": "PM_L3_P0_SN_HIT",
-    "BriefDescription": "L3 snoop hit port 0",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x118080",
-    "EventName": "PM_L3_P0_SN_INV",
-    "BriefDescription": "Port0 snooper detects someone doing a store to a line thats Sx",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x94908c",
-    "EventName": "PM_L3_P0_SN_MISS",
-    "BriefDescription": "L3 snoop miss port 0",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xa39088",
-    "EventName": "PM_L3_P0_SYS_PUMP",
-    "BriefDescription": "L3 pf sent with sys scope port 0",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x54808e",
-    "EventName": "PM_L3_P1_CO_L31",
-    "BriefDescription": "l3 CO to L3.1 (lco) port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x53808a",
-    "EventName": "PM_L3_P1_CO_MEM",
-    "BriefDescription": "l3 CO to memory port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x929086",
-    "EventName": "PM_L3_P1_CO_RTY",
-    "BriefDescription": "L3 CO received retry port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xa29086",
-    "EventName": "PM_L3_P1_GRP_PUMP",
-    "BriefDescription": "L3 pf sent with grp scope port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x528086",
-    "EventName": "PM_L3_P1_LCO_DATA",
-    "BriefDescription": "lco sent with data port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x518082",
-    "EventName": "PM_L3_P1_LCO_NO_DATA",
-    "BriefDescription": "dataless l3 lco sent port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xa4908e",
-    "EventName": "PM_L3_P1_LCO_RTY",
-    "BriefDescription": "L3 LCO received retry port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xa19082",
-    "EventName": "PM_L3_P1_NODE_PUMP",
-    "BriefDescription": "L3 pf sent with nodal scope port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x919082",
-    "EventName": "PM_L3_P1_PF_RTY",
-    "BriefDescription": "L3 PF received retry port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x93908a",
-    "EventName": "PM_L3_P1_SN_HIT",
-    "BriefDescription": "L3 snoop hit port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x118082",
-    "EventName": "PM_L3_P1_SN_INV",
-    "BriefDescription": "Port1 snooper detects someone doing a store to a line thats Sx",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x94908e",
-    "EventName": "PM_L3_P1_SN_MISS",
-    "BriefDescription": "L3 snoop miss port 1",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xa3908a",
-    "EventName": "PM_L3_P1_SYS_PUMP",
-    "BriefDescription": "L3 pf sent with sys scope port 1",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x84908d",
     "EventName": "PM_L3_PF0_ALLOC",
     "BriefDescription": "lifetime, sample of PF machine 0 valid",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x428084",
-    "EventName": "PM_L3_PF_HIT_L3",
-    "BriefDescription": "l3 pf hit in l3",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x18080",
     "EventName": "PM_L3_PF_MISS_L3",
     "BriefDescription": "Data stream touchto L3",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0xb29084",
-    "EventName": "PM_L3_SYS_GUESS_CORRECT",
-    "BriefDescription": "Initial scope=system and data from outside group (far or rem)(pred successful)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0xb4908c",
-    "EventName": "PM_L3_SYS_GUESS_WRONG",
-    "BriefDescription": "Initial scope=system but data from local or near. Predction too high",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x24808e",
-    "EventName": "PM_L3_TRANS_PF",
-    "BriefDescription": "L3 Transient prefetch",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x18081",
     "EventName": "PM_L3_WI0_ALLOC",
     "BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
     "PublicDescription": "0.0"
   },
-  {,
-    "EventCode": "0x418080",
-    "EventName": "PM_L3_WI0_BUSY",
-    "BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x418082",
-    "EventName": "PM_L3_WI_USAGE",
-    "BriefDescription": "rotating sample of 8 WI actives",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0xc080",
     "EventName": "PM_LD_REF_L1_LSU0",
     "BriefDescription": "Dispatch time non favored tbegin",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x328084",
-    "EventName": "PM_NON_TM_RST_SC",
-    "BriefDescription": "non tm snp rst tm sc",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x2001a",
     "EventName": "PM_NTCG_ALL_FIN",
     "BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 RC machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x34808e",
-    "EventName": "PM_RD_CLEARING_SC",
-    "BriefDescription": "rd clearing sc",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x34808c",
-    "EventName": "PM_RD_FORMING_SC",
-    "BriefDescription": "rd forming sc",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x428086",
-    "EventName": "PM_RD_HIT_PF",
-    "BriefDescription": "rd machine hit l3 pf machine",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x20004",
     "EventName": "PM_REAL_SRQ_FULL",
     "BriefDescription": "TLBIE snoop",
     "PublicDescription": "TLBIE snoopSnoop TLBIE"
   },
-  {,
-    "EventCode": "0x338088",
-    "EventName": "PM_SNP_TM_HIT_M",
-    "BriefDescription": "snp tm st hit m mu",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x33808a",
-    "EventName": "PM_SNP_TM_HIT_T",
-    "BriefDescription": "snp tm_st_hit t tn te",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x4608c",
     "EventName": "PM_SN_USAGE",
     "BriefDescription": "STCX executed reported at sent to nest",
     "PublicDescription": "STCX executed reported at sent to nest42"
   },
-  {,
-    "EventCode": "0x717080",
-    "EventName": "PM_ST_CAUSED_FAIL",
-    "BriefDescription": "Non TM St caused any thread to fail",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x3090",
     "EventName": "PM_SWAP_CANCEL",
     "BriefDescription": "Tm any tbegin",
     "PublicDescription": ""
   },
-  {,
-    "EventCode": "0x318082",
-    "EventName": "PM_TM_CAM_OVERFLOW",
-    "BriefDescription": "l3 tm cam overflow during L2 co of SC",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x74708c",
-    "EventName": "PM_TM_CAP_OVERFLOW",
-    "BriefDescription": "TM Footprint Capactiy Overflow",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x20ba",
     "EventName": "PM_TM_END_ALL",
     "BriefDescription": "Transactional conflict from LSU, whatever gets reported to texas",
     "PublicDescription": "Transactional conflict from LSU, whatever gets reported to texas 42"
   },
-  {,
-    "EventCode": "0x727086",
-    "EventName": "PM_TM_FAV_CAUSED_FAIL",
-    "BriefDescription": "TM Load (fav) caused another thread to fail",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x717082",
-    "EventName": "PM_TM_LD_CAUSED_FAIL",
-    "BriefDescription": "Non TM Ld caused any thread to fail",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x727084",
-    "EventName": "PM_TM_LD_CONF",
-    "BriefDescription": "TM Load (fav or non-fav) ran into conflict (failed)",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x328086",
-    "EventName": "PM_TM_RST_SC",
-    "BriefDescription": "tm snp rst tm sc",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x318080",
-    "EventName": "PM_TM_SC_CO",
-    "BriefDescription": "l3 castout tm Sc line",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x73708a",
-    "EventName": "PM_TM_ST_CAUSED_FAIL",
-    "BriefDescription": "TM Store (fav or non-fav) caused another thread to fail",
-    "PublicDescription": ""
-  },
-  {,
-    "EventCode": "0x737088",
-    "EventName": "PM_TM_ST_CONF",
-    "BriefDescription": "TM Store (fav or non-fav) ran into conflict (failed)",
-    "PublicDescription": ""
-  },
   {,
     "EventCode": "0x20bc",
     "EventName": "PM_TM_TBEGIN",
index e7a3524b748f01152e8a503392048966601a5540..68618152ea2c62578368767fa1ddfd550b076015 100644 (file)
@@ -4,7 +4,7 @@
                "EventCode": "128",
                "EventName": "L1D_RO_EXCL_WRITES",
                "BriefDescription": "L1D Read-only Exclusive Writes",
-               "PublicDescription": "Counter:128       Name:L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+               "PublicDescription": "L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
        },
        {
                "Unit": "CPU-M-CF",
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json b/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json
new file mode 100644 (file)
index 0000000..93ddfd8
--- /dev/null
@@ -0,0 +1,12 @@
+[
+  {
+    "EventName": "bp_l1_btb_correct",
+    "EventCode": "0x8a",
+    "BriefDescription": "L1 BTB Correction."
+  },
+  {
+    "EventName": "bp_l2_btb_correct",
+    "EventCode": "0x8b",
+    "BriefDescription": "L2 BTB Correction."
+  }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json b/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json
new file mode 100644 (file)
index 0000000..fad4af9
--- /dev/null
@@ -0,0 +1,287 @@
+[
+  {
+    "EventName": "ic_fw32",
+    "EventCode": "0x80",
+    "BriefDescription": "The number of 32B fetch windows transferred from IC pipe to DE instruction decoder (includes non-cacheable and cacheable fill responses)."
+  },
+  {
+    "EventName": "ic_fw32_miss",
+    "EventCode": "0x81",
+    "BriefDescription": "The number of 32B fetch windows tried to read the L1 IC and missed in the full tag."
+  },
+  {
+    "EventName": "ic_cache_fill_l2",
+    "EventCode": "0x82",
+    "BriefDescription": "The number of 64 byte instruction cache line was fulfilled from the L2 cache."
+  },
+  {
+    "EventName": "ic_cache_fill_sys",
+    "EventCode": "0x83",
+    "BriefDescription": "The number of 64 byte instruction cache line fulfilled from system memory or another cache."
+  },
+  {
+    "EventName": "bp_l1_tlb_miss_l2_hit",
+    "EventCode": "0x84",
+    "BriefDescription": "The number of instruction fetches that miss in the L1 ITLB but hit in the L2 ITLB."
+  },
+  {
+    "EventName": "bp_l1_tlb_miss_l2_miss",
+    "EventCode": "0x85",
+    "BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs."
+  },
+  {
+    "EventName": "bp_snp_re_sync",
+    "EventCode": "0x86",
+    "BriefDescription": "The number of pipeline restarts caused by invalidating probes that hit on the instruction stream currently being executed. This would happen if the active instruction stream was being modified by another processor in an MP system - typically a highly unlikely event."
+  },
+  {
+    "EventName": "ic_fetch_stall.ic_stall_any",
+    "EventCode": "0x87",
+    "BriefDescription": "IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
+    "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "ic_fetch_stall.ic_stall_dq_empty",
+    "EventCode": "0x87",
+    "BriefDescription": "IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
+    "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ic_fetch_stall.ic_stall_back_pressure",
+    "EventCode": "0x87",
+    "BriefDescription": "IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
+    "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "ic_cache_inval.l2_invalidating_probe",
+    "EventCode": "0x8c",
+    "BriefDescription": "IC line invalidated due to L2 invalidating probe (external or LS).",
+    "PublicDescription": "The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core. IC line invalidated due to L2 invalidating probe (external or LS).",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ic_cache_inval.fill_invalidated",
+    "EventCode": "0x8c",
+    "BriefDescription": "IC line invalidated due to overwriting fill response.",
+    "PublicDescription": "The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core. IC line invalidated due to overwriting fill response.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "bp_tlb_rel",
+    "EventCode": "0x99",
+    "BriefDescription": "The number of ITLB reload requests."
+  },
+  {
+    "EventName": "l2_request_g1.rd_blk_l",
+    "EventCode": "0x60",
+    "BriefDescription": "Requests to L2 Group1.",
+    "PublicDescription": "Requests to L2 Group1.",
+    "UMask": "0x80"
+  },
+  {
+    "EventName": "l2_request_g1.rd_blk_x",
+    "EventCode": "0x60",
+    "BriefDescription": "Requests to L2 Group1.",
+    "PublicDescription": "Requests to L2 Group1.",
+    "UMask": "0x40"
+  },
+  {
+    "EventName": "l2_request_g1.ls_rd_blk_c_s",
+    "EventCode": "0x60",
+    "BriefDescription": "Requests to L2 Group1.",
+    "PublicDescription": "Requests to L2 Group1.",
+    "UMask": "0x20"
+  },
+  {
+    "EventName": "l2_request_g1.cacheable_ic_read",
+    "EventCode": "0x60",
+    "BriefDescription": "Requests to L2 Group1.",
+    "PublicDescription": "Requests to L2 Group1.",
+    "UMask": "0x10"
+  },
+  {
+    "EventName": "l2_request_g1.change_to_x",
+    "EventCode": "0x60",
+    "BriefDescription": "Requests to L2 Group1.",
+    "PublicDescription": "Requests to L2 Group1.",
+    "UMask": "0x8"
+  },
+  {
+    "EventName": "l2_request_g1.prefetch_l2",
+    "EventCode": "0x60",
+    "BriefDescription": "Requests to L2 Group1.",
+    "PublicDescription": "Requests to L2 Group1.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "l2_request_g1.l2_hw_pf",
+    "EventCode": "0x60",
+    "BriefDescription": "Requests to L2 Group1.",
+    "PublicDescription": "Requests to L2 Group1.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "l2_request_g1.other_requests",
+    "EventCode": "0x60",
+    "BriefDescription": "Events covered by l2_request_g2.",
+    "PublicDescription": "Requests to L2 Group1. Events covered by l2_request_g2.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "l2_request_g2.group1",
+    "EventCode": "0x61",
+    "BriefDescription": "All Group 1 commands not in unit0.",
+    "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. All Group 1 commands not in unit0.",
+    "UMask": "0x80"
+  },
+  {
+    "EventName": "l2_request_g2.ls_rd_sized",
+    "EventCode": "0x61",
+    "BriefDescription": "RdSized, RdSized32, RdSized64.",
+    "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. RdSized, RdSized32, RdSized64.",
+    "UMask": "0x40"
+  },
+  {
+    "EventName": "l2_request_g2.ls_rd_sized_nc",
+    "EventCode": "0x61",
+    "BriefDescription": "RdSizedNC, RdSized32NC, RdSized64NC.",
+    "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. RdSizedNC, RdSized32NC, RdSized64NC.",
+    "UMask": "0x20"
+  },
+  {
+    "EventName": "l2_request_g2.ic_rd_sized",
+    "EventCode": "0x61",
+    "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "UMask": "0x10"
+  },
+  {
+    "EventName": "l2_request_g2.ic_rd_sized_nc",
+    "EventCode": "0x61",
+    "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "UMask": "0x8"
+  },
+  {
+    "EventName": "l2_request_g2.smc_inval",
+    "EventCode": "0x61",
+    "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "l2_request_g2.bus_locks_originator",
+    "EventCode": "0x61",
+    "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "l2_request_g2.bus_locks_responses",
+    "EventCode": "0x61",
+    "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "l2_latency.l2_cycles_waiting_on_fills",
+    "EventCode": "0x62",
+    "BriefDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
+    "PublicDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "l2_wcb_req.wcb_write",
+    "EventCode": "0x63",
+    "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) write requests.",
+    "BriefDescription": "LS to L2 WCB write requests.",
+    "UMask": "0x40"
+  },
+  {
+    "EventName": "l2_wcb_req.wcb_close",
+    "EventCode": "0x63",
+    "BriefDescription": "LS to L2 WCB close requests.",
+    "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) close requests.",
+    "UMask": "0x20"
+  },
+  {
+    "EventName": "l2_wcb_req.zero_byte_store",
+    "EventCode": "0x63",
+    "BriefDescription": "LS to L2 WCB zero byte store requests.",
+    "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) zero byte store requests.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "l2_wcb_req.cl_zero",
+    "EventCode": "0x63",
+    "PublicDescription": "LS to L2 WCB cache line zeroing requests.",
+    "BriefDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) cache line zeroing requests.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "l2_cache_req_stat.ls_rd_blk_cs",
+    "EventCode": "0x64",
+    "BriefDescription": "LS ReadBlock C/S Hit.",
+    "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS ReadBlock C/S Hit.",
+    "UMask": "0x80"
+  },
+  {
+    "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_x",
+    "EventCode": "0x64",
+    "BriefDescription": "LS Read Block L Hit X.",
+    "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS Read Block L Hit X.",
+    "UMask": "0x40"
+  },
+  {
+    "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_s",
+    "EventCode": "0x64",
+    "BriefDescription": "LsRdBlkL Hit Shared.",
+    "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LsRdBlkL Hit Shared.",
+    "UMask": "0x20"
+  },
+  {
+    "EventName": "l2_cache_req_stat.ls_rd_blk_x",
+    "EventCode": "0x64",
+    "BriefDescription": "LsRdBlkX/ChgToX Hit X.  Count RdBlkX finding Shared as a Miss.",
+    "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LsRdBlkX/ChgToX Hit X.  Count RdBlkX finding Shared as a Miss.",
+    "UMask": "0x10"
+  },
+  {
+    "EventName": "l2_cache_req_stat.ls_rd_blk_c",
+    "EventCode": "0x64",
+    "BriefDescription": "LS Read Block C S L X Change to X Miss.",
+    "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS Read Block C S L X Change to X Miss.",
+    "UMask": "0x8"
+  },
+  {
+    "EventName": "l2_cache_req_stat.ic_fill_hit_x",
+    "EventCode": "0x64",
+    "BriefDescription": "IC Fill Hit Exclusive Stale.",
+    "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Hit Exclusive Stale.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "l2_cache_req_stat.ic_fill_hit_s",
+    "EventCode": "0x64",
+    "BriefDescription": "IC Fill Hit Shared.",
+    "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Hit Shared.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "l2_cache_req_stat.ic_fill_miss",
+    "EventCode": "0x64",
+    "BriefDescription": "IC Fill Miss.",
+    "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Miss.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "l2_fill_pending.l2_fill_busy",
+    "EventCode": "0x6d",
+    "BriefDescription": "Total cycles spent with one or more fill requests in flight from L2.",
+    "PublicDescription": "Total cycles spent with one or more fill requests in flight from L2.",
+    "UMask": "0x1"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/core.json b/tools/perf/pmu-events/arch/x86/amdfam17h/core.json
new file mode 100644 (file)
index 0000000..7b285b0
--- /dev/null
@@ -0,0 +1,134 @@
+[
+  {
+    "EventName": "ex_ret_instr",
+    "EventCode": "0xc0",
+    "BriefDescription": "Retired Instructions."
+  },
+  {
+    "EventName": "ex_ret_cops",
+    "EventCode": "0xc1",
+    "BriefDescription": "Retired Uops.",
+    "PublicDescription": "The number of uOps retired. This includes all processor activity (instructions, exceptions, interrupts, microcode assists, etc.). The number of events logged per cycle can vary from 0 to 4."
+  },
+  {
+    "EventName": "ex_ret_brn",
+    "EventCode": "0xc2",
+    "BriefDescription": "[Retired Branch Instructions.",
+    "PublicDescription": "The number of branch instructions retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+  },
+  {
+    "EventName": "ex_ret_brn_misp",
+    "EventCode": "0xc3",
+    "BriefDescription": "Retired Branch Instructions Mispredicted.",
+    "PublicDescription": "The number of branch instructions retired, of any type, that were not correctly predicted. This includes those for which prediction is not attempted (far control transfers, exceptions and interrupts)."
+  },
+  {
+    "EventName": "ex_ret_brn_tkn",
+    "EventCode": "0xc4",
+    "BriefDescription": "Retired Taken Branch Instructions.",
+    "PublicDescription": "The number of taken branches that were retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+  },
+  {
+    "EventName": "ex_ret_brn_tkn_misp",
+    "EventCode": "0xc5",
+    "BriefDescription": "Retired Taken Branch Instructions Mispredicted.",
+    "PublicDescription": "The number of retired taken branch instructions that were mispredicted."
+  },
+  {
+    "EventName": "ex_ret_brn_far",
+    "EventCode": "0xc6",
+    "BriefDescription": "Retired Far Control Transfers.",
+    "PublicDescription": "The number of far control transfers retired including far call/jump/return, IRET, SYSCALL and SYSRET, plus exceptions and interrupts. Far control transfers are not subject to branch prediction."
+  },
+  {
+    "EventName": "ex_ret_brn_resync",
+    "EventCode": "0xc7",
+    "BriefDescription": "Retired Branch Resyncs.",
+    "PublicDescription": "The number of resync branches. These reflect pipeline restarts due to certain microcode assists and events such as writes to the active instruction stream, among other things. Each occurrence reflects a restart penalty similar to a branch mispredict. This is relatively rare."
+  },
+  {
+    "EventName": "ex_ret_near_ret",
+    "EventCode": "0xc8",
+    "BriefDescription": "Retired Near Returns.",
+    "PublicDescription": "The number of near return instructions (RET or RET Iw) retired."
+  },
+  {
+    "EventName": "ex_ret_near_ret_mispred",
+    "EventCode": "0xc9",
+    "BriefDescription": "Retired Near Returns Mispredicted.",
+    "PublicDescription": "The number of near returns retired that were not correctly predicted by the return address predictor. Each such mispredict incurs the same penalty as a mispredicted conditional branch instruction."
+  },
+  {
+    "EventName": "ex_ret_brn_ind_misp",
+    "EventCode": "0xca",
+    "BriefDescription": "Retired Indirect Branch Instructions Mispredicted.",
+    "PublicDescription": "Retired Indirect Branch Instructions Mispredicted."
+  },
+  {
+    "EventName": "ex_ret_mmx_fp_instr.sse_instr",
+    "EventCode": "0xcb",
+    "BriefDescription": "SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
+    "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "ex_ret_mmx_fp_instr.mmx_instr",
+    "EventCode": "0xcb",
+    "BriefDescription": "MMX instructions.",
+    "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. MMX instructions.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ex_ret_mmx_fp_instr.x87_instr",
+    "EventCode": "0xcb",
+    "BriefDescription": "x87 instructions.",
+    "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. x87 instructions.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "ex_ret_cond",
+    "EventCode": "0xd1",
+    "BriefDescription": "Retired Conditional Branch Instructions."
+  },
+  {
+    "EventName": "ex_ret_cond_misp",
+    "EventCode": "0xd2",
+    "BriefDescription": "Retired Conditional Branch Instructions Mispredicted."
+  },
+  {
+    "EventName": "ex_div_busy",
+    "EventCode": "0xd3",
+    "BriefDescription": "Div Cycles Busy count."
+  },
+  {
+    "EventName": "ex_div_count",
+    "EventCode": "0xd4",
+    "BriefDescription": "Div Op Count."
+  },
+  {
+    "EventName": "ex_tagged_ibs_ops.ibs_count_rollover",
+    "EventCode": "0x1cf",
+    "BriefDescription": "Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
+    "PublicDescription": "Tagged IBS Ops. Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret",
+    "EventCode": "0x1cf",
+    "BriefDescription": "Number of Ops tagged by IBS that retired.",
+    "PublicDescription": "Tagged IBS Ops. Number of Ops tagged by IBS that retired.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops",
+    "EventCode": "0x1cf",
+    "BriefDescription": "Number of Ops tagged by IBS.",
+    "PublicDescription": "Tagged IBS Ops. Number of Ops tagged by IBS.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "ex_ret_fus_brnch_inst",
+    "EventCode": "0x1d0",
+    "BriefDescription": "The number of fused retired branch instructions retired per cycle. The number of events logged per cycle can vary from 0 to 3."
+  }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json b/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json
new file mode 100644 (file)
index 0000000..ea47119
--- /dev/null
@@ -0,0 +1,168 @@
+[
+  {
+    "EventName": "fpu_pipe_assignment.dual",
+    "EventCode": "0x00",
+    "BriefDescription": "Total number multi-pipe uOps.",
+    "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number multi-pipe uOps assigned to Pipe 3.",
+    "UMask": "0xf0"
+  },
+  {
+    "EventName": "fpu_pipe_assignment.total",
+    "EventCode": "0x00",
+    "BriefDescription": "Total number uOps.",
+    "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to Pipe 3.",
+    "UMask": "0xf"
+  },
+  {
+    "EventName": "fp_sched_empty",
+    "EventCode": "0x01",
+    "BriefDescription": "This is a speculative event. The number of cycles in which the FPU scheduler is empty. Note that some Ops like FP loads bypass the scheduler."
+  },
+  {
+    "EventName": "fp_retx87_fp_ops.all",
+    "EventCode": "0x02",
+    "BriefDescription": "All Ops.",
+    "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8.",
+    "UMask": "0x7"
+  },
+  {
+    "EventName": "fp_retx87_fp_ops.div_sqr_r_ops",
+    "EventCode": "0x02",
+    "BriefDescription": "Divide and square root Ops.",
+    "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Divide and square root Ops.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "fp_retx87_fp_ops.mul_ops",
+    "EventCode": "0x02",
+    "BriefDescription": "Multiply Ops.",
+    "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Multiply Ops.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "fp_retx87_fp_ops.add_sub_ops",
+    "EventCode": "0x02",
+    "BriefDescription": "Add/subtract Ops.",
+    "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Add/subtract Ops.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.all",
+    "EventCode": "0x03",
+    "BriefDescription": "All FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
+    "UMask": "0xff"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.dp_mult_add_flops",
+    "EventCode": "0x03",
+    "BriefDescription": "Double precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+    "UMask": "0x80"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.dp_div_flops",
+    "EventCode": "0x03",
+    "BriefDescription": "Double precision divide/square root FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision divide/square root FLOPS.",
+    "UMask": "0x40"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.dp_mult_flops",
+    "EventCode": "0x03",
+    "BriefDescription": "Double precision multiply FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision multiply FLOPS.",
+    "UMask": "0x20"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.dp_add_sub_flops",
+    "EventCode": "0x03",
+    "BriefDescription": "Double precision add/subtract FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision add/subtract FLOPS.",
+    "UMask": "0x10"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.sp_mult_add_flops",
+    "EventCode": "0x03",
+    "BriefDescription": "Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+    "UMask": "0x8"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.sp_div_flops",
+    "EventCode": "0x03",
+    "BriefDescription": "Single-precision divide/square root FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision divide/square root FLOPS.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.sp_mult_flops",
+    "EventCode": "0x03",
+    "BriefDescription": "Single-precision multiply FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision multiply FLOPS.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "fp_ret_sse_avx_ops.sp_add_sub_flops",
+    "EventCode": "0x03",
+    "BriefDescription": "Single-precision add/subtract FLOPS.",
+    "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision add/subtract FLOPS.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "fp_num_mov_elim_scal_op.optimized",
+    "EventCode": "0x04",
+    "BriefDescription": "Number of Scalar Ops optimized.",
+    "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of Scalar Ops optimized.",
+    "UMask": "0x8"
+  },
+  {
+    "EventName": "fp_num_mov_elim_scal_op.opt_potential",
+    "EventCode": "0x04",
+    "BriefDescription": "Number of Ops that are candidates for optimization (have Z-bit either set or pass).",
+    "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of Ops that are candidates for optimization (have Z-bit either set or pass).",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops_elim",
+    "EventCode": "0x04",
+    "BriefDescription": "Number of SSE Move Ops eliminated.",
+    "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of SSE Move Ops eliminated.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops",
+    "EventCode": "0x04",
+    "BriefDescription": "Number of SSE Move Ops.",
+    "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of SSE Move Ops.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "fp_retired_ser_ops.x87_ctrl_ret",
+    "EventCode": "0x05",
+    "BriefDescription": "x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits.",
+    "PublicDescription": "The number of serializing Ops retired. x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits.",
+    "UMask": "0x8"
+  },
+  {
+    "EventName": "fp_retired_ser_ops.x87_bot_ret",
+    "EventCode": "0x05",
+    "BriefDescription": "x87 bottom-executing uOps retired.",
+    "PublicDescription": "The number of serializing Ops retired. x87 bottom-executing uOps retired.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "fp_retired_ser_ops.sse_ctrl_ret",
+    "EventCode": "0x05",
+    "BriefDescription": "SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
+    "PublicDescription": "The number of serializing Ops retired. SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "fp_retired_ser_ops.sse_bot_ret",
+    "EventCode": "0x05",
+    "BriefDescription": "SSE bottom-executing uOps retired.",
+    "PublicDescription": "The number of serializing Ops retired. SSE bottom-executing uOps retired.",
+    "UMask": "0x1"
+  }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json b/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json
new file mode 100644 (file)
index 0000000..fa2d60d
--- /dev/null
@@ -0,0 +1,162 @@
+[
+  {
+    "EventName": "ls_locks.bus_lock",
+    "EventCode": "0x25",
+    "BriefDescription": "Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type.",
+    "PublicDescription": "Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "ls_dispatch.ld_st_dispatch",
+    "EventCode": "0x29",
+    "BriefDescription": "Load-op-Stores.",
+    "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed. Load-op-Stores.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "ls_dispatch.store_dispatch",
+    "EventCode": "0x29",
+    "BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+    "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ls_dispatch.ld_dispatch",
+    "EventCode": "0x29",
+    "BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+    "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "ls_stlf",
+    "EventCode": "0x35",
+    "BriefDescription": "Number of STLF hits."
+  },
+  {
+    "EventName": "ls_dc_accesses",
+    "EventCode": "0x40",
+    "BriefDescription": "The number of accesses to the data cache for load and store references. This may include certain microcode scratchpad accesses, although these are generally rare. Each increment represents an eight-byte access, although the instruction may only be accessing a portion of that. This event is a speculative event."
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.all",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Miss or Reload off all sizes.",
+    "PublicDescription": "L1 DTLB Miss or Reload off all sizes.",
+    "UMask": "0xff"
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_miss",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Miss of a page of 1G size.",
+    "PublicDescription": "L1 DTLB Miss of a page of 1G size.",
+    "UMask": "0x80"
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_miss",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Miss of a page of 2M size.",
+    "PublicDescription": "L1 DTLB Miss of a page of 2M size.",
+    "UMask": "0x40"
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.tlb_reload_32k_l2_miss",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Miss of a page of 32K size.",
+    "PublicDescription": "L1 DTLB Miss of a page of 32K size.",
+    "UMask": "0x20"
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_miss",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Miss of a page of 4K size.",
+    "PublicDescription": "L1 DTLB Miss of a page of 4K size.",
+    "UMask": "0x10"
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Reload of a page of 1G size.",
+    "PublicDescription": "L1 DTLB Reload of a page of 1G size.",
+    "UMask": "0x8"
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Reload of a page of 2M size.",
+    "PublicDescription": "L1 DTLB Reload of a page of 2M size.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.tlb_reload_32k_l2_hit",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Reload of a page of 32K size.",
+    "PublicDescription": "L1 DTLB Reload of a page of 32K size.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit",
+    "EventCode": "0x45",
+    "BriefDescription": "L1 DTLB Reload of a page of 4K size.",
+    "PublicDescription": "L1 DTLB Reload of a page of 4K size.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "ls_tablewalker.perf_mon_tablewalk_alloc_iside",
+    "EventCode": "0x46",
+    "BriefDescription": "Tablewalker allocation.",
+    "PublicDescription": "Tablewalker allocation.",
+    "UMask": "0xc"
+  },
+  {
+    "EventName": "ls_tablewalker.perf_mon_tablewalk_alloc_dside",
+    "EventCode": "0x46",
+    "BriefDescription": "Tablewalker allocation.",
+    "PublicDescription": "Tablewalker allocation.",
+    "UMask": "0x3"
+  },
+  {
+    "EventName": "ls_misal_accesses",
+    "EventCode": "0x47",
+    "BriefDescription": "Misaligned loads."
+  },
+  {
+    "EventName": "ls_pref_instr_disp.prefetch_nta",
+    "EventCode": "0x4b",
+    "BriefDescription": "Software Prefetch Instructions (PREFETCHNTA instruction) Dispatched.",
+    "PublicDescription": "Software Prefetch Instructions (PREFETCHNTA instruction) Dispatched.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "ls_pref_instr_disp.store_prefetch_w",
+    "EventCode": "0x4b",
+    "BriefDescription": "Software Prefetch Instructions (3DNow PREFETCHW instruction) Dispatched.",
+    "PublicDescription": "Software Prefetch Instructions (3DNow PREFETCHW instruction) Dispatched.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ls_pref_instr_disp.load_prefetch_w",
+    "EventCode": "0x4b",
+    "BriefDescription": "Prefetch, Prefetch_T0_T1_T2.",
+    "PublicDescription": "Software Prefetch Instructions Dispatched. Prefetch, Prefetch_T0_T1_T2.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "ls_inef_sw_pref.mab_mch_cnt",
+    "EventCode": "0x52",
+    "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+    "PublicDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit",
+    "EventCode": "0x52",
+    "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+    "PublicDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "ls_not_halted_cyc",
+    "EventCode": "0x76",
+    "BriefDescription": "Cycles not in Halt."
+  }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/other.json b/tools/perf/pmu-events/arch/x86/amdfam17h/other.json
new file mode 100644 (file)
index 0000000..b26a00d
--- /dev/null
@@ -0,0 +1,65 @@
+[
+  {
+    "EventName": "ic_oc_mode_switch.oc_ic_mode_switch",
+    "EventCode": "0x28a",
+    "BriefDescription": "OC to IC mode switch.",
+    "PublicDescription": "OC Mode Switch. OC to IC mode switch.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "ic_oc_mode_switch.ic_oc_mode_switch",
+    "EventCode": "0x28a",
+    "BriefDescription": "IC to OC mode switch.",
+    "PublicDescription": "OC Mode Switch. IC to OC mode switch.",
+    "UMask": "0x1"
+  },
+  {
+    "EventName": "de_dis_dispatch_token_stalls0.retire_token_stall",
+    "EventCode": "0xaf",
+    "BriefDescription": "RETIRE Tokens unavailable.",
+    "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. RETIRE Tokens unavailable.",
+    "UMask": "0x40"
+  },
+  {
+    "EventName": "de_dis_dispatch_token_stalls0.agsq_token_stall",
+    "EventCode": "0xaf",
+    "BriefDescription": "AGSQ Tokens unavailable.",
+    "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. AGSQ Tokens unavailable.",
+    "UMask": "0x20"
+  },
+  {
+    "EventName": "de_dis_dispatch_token_stalls0.alu_token_stall",
+    "EventCode": "0xaf",
+    "BriefDescription": "ALU tokens total unavailable.",
+    "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALU tokens total unavailable.",
+    "UMask": "0x10"
+  },
+  {
+    "EventName": "de_dis_dispatch_token_stalls0.alsq3_0_token_stall",
+    "EventCode": "0xaf",
+    "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall.",
+    "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall.",
+    "UMask": "0x8"
+  },
+  {
+    "EventName": "de_dis_dispatch_token_stalls0.alsq3_token_stall",
+    "EventCode": "0xaf",
+    "BriefDescription": "ALSQ 3 Tokens unavailable.",
+    "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 3 Tokens unavailable.",
+    "UMask": "0x4"
+  },
+  {
+    "EventName": "de_dis_dispatch_token_stalls0.alsq2_token_stall",
+    "EventCode": "0xaf",
+    "BriefDescription": "ALSQ 2 Tokens unavailable.",
+    "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 2 Tokens unavailable.",
+    "UMask": "0x2"
+  },
+  {
+    "EventName": "de_dis_dispatch_token_stalls0.alsq1_token_stall",
+    "EventCode": "0xaf",
+    "BriefDescription": "ALSQ 1 Tokens unavailable.",
+    "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 1 Tokens unavailable.",
+    "UMask": "0x1"
+  }
+]
index 935b7dcf067d21e595eaaa30e9dbe1a72d95fa80..ef69540ab61dbbce28e0623ca9b6c068c909d43c 100644 (file)
@@ -77,7 +77,7 @@
         "UMask": "0x1",
         "EventName": "UOPS.MS_CYCLES",
         "SampleAfterValue": "2000000",
-        "BriefDescription": "This event counts the cycles where 1 or more uops are issued by the micro-sequencer (MS), including microcode assists and inserted flows, and written to the IQ. ",
+        "BriefDescription": "This event counts the cycles where 1 or more uops are issued by the micro-sequencer (MS), including microcode assists and inserted flows, and written to the IQ.",
         "CounterMask": "1"
     }
 ]
\ No newline at end of file
index b2e681c78466bec87ae437d18ee7b7a7765595e9..09c6de13de20dd3ca2fac3872b306e6abedf427a 100644 (file)
         "UMask": "0x8",
         "EventName": "BR_MISSP_TYPE_RETIRED.IND_CALL",
         "SampleAfterValue": "200000",
-        "BriefDescription": "Mispredicted indirect calls, including both register and memory indirect. "
+        "BriefDescription": "Mispredicted indirect calls, including both register and memory indirect."
     },
     {
         "EventCode": "0x89",
index 00bfdb5c5acb381feb13b28a17ed8e9b3de10ab1..212b117a8ffb1ecae91866b1babd098d0136f925 100644 (file)
 [
     {
-        "BriefDescription": "Instructions Per Cycle (per logical thread)",
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Frontend_Bound"
+    },
+    {
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Frontend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Bad_Speculation"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Bad_Speculation_SMT"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Backend_Bound"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Backend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Retiring"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Retiring_SMT"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Instructions Per Cycle (per logical thread)",
         "MetricGroup": "TopDownL1",
         "MetricName": "IPC"
     },
     {
-        "BriefDescription": "Uops Per Instruction",
         "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
-        "MetricGroup": "Pipeline",
+        "BriefDescription": "Uops Per Instruction",
+        "MetricGroup": "Pipeline;Retiring",
         "MetricName": "UPI"
     },
     {
-        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Instruction per taken branch",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "IpTB"
+    },
+    {
+        "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Branch instructions per taken branch. ",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "BpTB"
+    },
+    {
         "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
-        "MetricGroup": "Frontend",
+        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+        "MetricGroup": "PGO",
         "MetricName": "IFetch_Line_Utilization"
     },
     {
-        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
-        "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
-        "MetricGroup": "DSB; Frontend_Bandwidth",
+        "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+        "MetricGroup": "DSB;Frontend_Bandwidth",
         "MetricName": "DSB_Coverage"
     },
     {
-        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricGroup": "Pipeline;Summary",
         "MetricName": "CPI"
     },
     {
-        "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
         "MetricGroup": "Summary",
         "MetricName": "CLKS"
     },
     {
-        "BriefDescription": "Total issue-pipeline slots",
-        "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+        "MetricExpr": "4 * cycles",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
         "MetricGroup": "TopDownL1",
         "MetricName": "SLOTS"
     },
     {
-        "BriefDescription": "Total number of retired Instructions",
+        "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
+        "MetricGroup": "TopDownL1_SMT",
+        "MetricName": "SLOTS_SMT"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+        "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+        "MetricGroup": "Instruction_Type;L1_Bound",
+        "MetricName": "IpL"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+        "BriefDescription": "Instructions per Store",
+        "MetricGroup": "Instruction_Type;Store_Bound",
+        "MetricName": "IpS"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Instructions per Branch",
+        "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+        "MetricName": "IpB"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+        "BriefDescription": "Instruction per (near) call",
+        "MetricGroup": "Branches",
+        "MetricName": "IpCall"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY",
+        "BriefDescription": "Total number of retired Instructions",
         "MetricGroup": "Summary",
         "MetricName": "Instructions"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / cycles",
         "BriefDescription": "Instructions Per Cycle (per physical core)",
-        "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
         "MetricGroup": "SMT",
         "MetricName": "CoreIPC"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Instructions Per Cycle (per physical core)",
+        "MetricGroup": "SMT",
+        "MetricName": "CoreIPC_SMT"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS",
+        "MetricName": "FLOPc"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS_SMT",
+        "MetricName": "FLOPc_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
         "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
-        "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
         "MetricGroup": "Pipeline;Ports_Utilization",
         "MetricName": "ILP"
     },
     {
-        "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
-        "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL  - (( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED )) ) / RS_EVENTS.EMPTY_END)",
-        "MetricGroup": "Unknown_Branches",
-        "MetricName": "BAClear_Cost"
+        "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+        "MetricGroup": "Branch_Mispredicts",
+        "MetricName": "Branch_Misprediction_Cost"
     },
     {
+        "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+        "MetricGroup": "Branch_Mispredicts_SMT",
+        "MetricName": "Branch_Misprediction_Cost_SMT"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+        "MetricGroup": "Branch_Mispredicts",
+        "MetricName": "IpMispredict"
+    },
+    {
+        "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
         "BriefDescription": "Core actual clocks when any thread is active on the physical core",
-        "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
         "MetricGroup": "SMT",
         "MetricName": "CORE_CLKS"
     },
     {
-        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
         "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
         "MetricGroup": "Memory_Bound;Memory_Lat",
         "MetricName": "Load_Miss_Real_Latency"
     },
     {
-        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
-        "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+        "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
         "MetricGroup": "Memory_Bound;Memory_BW",
         "MetricName": "MLP"
     },
     {
+        "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / cycles",
         "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
-        "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED)) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
         "MetricGroup": "TLB",
         "MetricName": "Page_Walks_Utilization"
     },
     {
-        "BriefDescription": "Average CPU Utilization",
+        "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+        "MetricGroup": "TLB_SMT",
+        "MetricName": "Page_Walks_Utilization_SMT"
+    },
+    {
+        "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L1D_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L2_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+        "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L3_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L1MPKI"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI"
+    },
+    {
+        "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2HPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L3MPKI"
+    },
+    {
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+        "BriefDescription": "Average CPU Utilization",
         "MetricGroup": "Summary",
         "MetricName": "CPU_Utilization"
     },
     {
+        "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
         "BriefDescription": "Giga Floating Point Operations Per Second",
-        "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
         "MetricGroup": "FLOPS;Summary",
         "MetricName": "GFLOPs"
     },
     {
-        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricGroup": "Power",
         "MetricName": "Turbo_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricGroup": "SMT;Summary",
         "MetricName": "SMT_2T_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
     {
-        "BriefDescription": "C3 residency percent per core",
+        "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+        "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "DRAM_BW_Use"
+    },
+    {
         "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per core",
         "MetricName": "C3_Core_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per core",
         "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per core",
         "MetricName": "C6_Core_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per core",
         "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per core",
         "MetricName": "C7_Core_Residency"
     },
     {
-        "BriefDescription": "C2 residency percent per package",
         "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C2 residency percent per package",
         "MetricName": "C2_Pkg_Residency"
     },
     {
-        "BriefDescription": "C3 residency percent per package",
         "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per package",
         "MetricName": "C3_Pkg_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per package",
         "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per package",
         "MetricName": "C6_Pkg_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per package",
         "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per package",
         "MetricName": "C7_Pkg_Residency"
     }
 ]
index 0b080b0352d84301ebbe8eb2910c0244099373ef..7938bf5689abae4caef47f6a572a14adddfc0872 100644 (file)
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+        "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache.",
         "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x41",
+        "UMask": "0xc1",
         "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
         "SampleAfterValue": "200003",
         "BriefDescription": "Demand Data Read requests that hit L2 cache",
@@ -68,7 +68,7 @@
     {
         "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x42",
+        "UMask": "0xc2",
         "EventName": "L2_RQSTS.RFO_HIT",
         "SampleAfterValue": "200003",
         "BriefDescription": "RFO requests that hit L2 cache.",
@@ -77,7 +77,7 @@
     {
         "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x44",
+        "UMask": "0xc4",
         "EventName": "L2_RQSTS.CODE_RD_HIT",
         "SampleAfterValue": "200003",
         "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
@@ -87,7 +87,7 @@
         "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
         "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x50",
+        "UMask": "0xd0",
         "EventName": "L2_RQSTS.L2_PF_HIT",
         "SampleAfterValue": "200003",
         "BriefDescription": "L2 prefetch requests that hit L2 cache",
     },
     {
         "PEBS": "1",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
         "EventCode": "0xD0",
         "Counter": "0,1,2,3",
         "UMask": "0x41",
     },
     {
         "PEBS": "1",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
         "EventCode": "0xD0",
         "Counter": "0,1,2,3",
         "UMask": "0x42",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000010001 ",
+        "MSRValue": "0x0000010001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that have any response type.",
+        "BriefDescription": "Counts demand data reads have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080020001 ",
+        "MSRValue": "0x0080020001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100020001 ",
+        "MSRValue": "0x0100020001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200020001 ",
+        "MSRValue": "0x0200020001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400020001 ",
+        "MSRValue": "0x0400020001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000020001 ",
+        "MSRValue": "0x1000020001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f80020001 ",
+        "MSRValue": "0x3F80020001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts demand data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00803c0001 ",
+        "MSRValue": "0x00803C0001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that hit in the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01003c0001 ",
+        "MSRValue": "0x01003C0001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts demand data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02003c0001 ",
+        "MSRValue": "0x02003C0001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that hit in the L3 with a snoop miss response.",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0001 ",
+        "MSRValue": "0x04003C0001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0001 ",
+        "MSRValue": "0x10003C0001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts demand data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0001 ",
+        "MSRValue": "0x3F803C0001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that hit in the L3.",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand data writes (RFOs) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs) have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000010002 ",
+        "MSRValue": "0x0000010002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand data writes (RFOs) that have any response type.",
+        "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00803c0002 ",
+        "MSRValue": "0x00803C0002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01003c0002 ",
+        "MSRValue": "0x01003C0002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02003c0002 ",
+        "MSRValue": "0x02003C0002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0002 ",
+        "MSRValue": "0x04003C0002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0002 ",
+        "MSRValue": "0x10003C0002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0002 ",
+        "MSRValue": "0x3F803C0002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3.",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000010004 ",
+        "MSRValue": "0x0000010004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand code reads that have any response type.",
+        "BriefDescription": "Counts all demand code reads have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080020004 ",
+        "MSRValue": "0x0080020004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100020004 ",
+        "MSRValue": "0x0100020004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200020004 ",
+        "MSRValue": "0x0200020004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400020004 ",
+        "MSRValue": "0x0400020004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000020004 ",
+        "MSRValue": "0x1000020004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f80020004 ",
+        "MSRValue": "0x3F80020004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00803c0004 ",
+        "MSRValue": "0x00803C0004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand code reads that hit in the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01003c0004 ",
+        "MSRValue": "0x01003C0004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02003c0004 ",
+        "MSRValue": "0x02003C0004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand code reads that hit in the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0004 ",
+        "MSRValue": "0x04003C0004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0004 ",
+        "MSRValue": "0x10003C0004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0004 ",
+        "MSRValue": "0x3F803C0004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand code reads that hit in the L3.",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts writebacks (modified to exclusive) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive) have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000010008 ",
+        "MSRValue": "0x0000010008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts writebacks (modified to exclusive) that have any response type.",
+        "BriefDescription": "Counts writebacks (modified to exclusive) have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080020008 ",
+        "MSRValue": "0x0080020008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_NONE",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100020008 ",
+        "MSRValue": "0x0100020008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200020008 ",
+        "MSRValue": "0x0200020008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_MISS",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400020008 ",
+        "MSRValue": "0x0400020008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000020008 ",
+        "MSRValue": "0x1000020008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_HITM",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f80020008 ",
+        "MSRValue": "0x3F80020008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & SUPPLIER_NONE & ANY_SNOOP",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00803c0008 ",
+        "MSRValue": "0x00803C0008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01003c0008 ",
+        "MSRValue": "0x01003C0008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02003c0008 ",
+        "MSRValue": "0x02003C0008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with a snoop miss response.",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0008 ",
+        "MSRValue": "0x04003C0008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0008 ",
+        "MSRValue": "0x10003C0008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0008 ",
+        "MSRValue": "0x3F803C0008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3.",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000010010 ",
+        "MSRValue": "0x0000010010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that have any response type.",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080020010 ",
+        "MSRValue": "0x0080020010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100020010 ",
+        "MSRValue": "0x0100020010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200020010 ",
+        "MSRValue": "0x0200020010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400020010 ",
+        "MSRValue": "0x0400020010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000020010 ",
+        "MSRValue": "0x1000020010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f80020010 ",
+        "MSRValue": "0x3F80020010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00803c0010 ",
+        "MSRValue": "0x00803C0010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01003c0010 ",
+        "MSRValue": "0x01003C0010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02003c0010 ",
+        "MSRValue": "0x02003C0010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with a snoop miss response.",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0010 ",
+        "MSRValue": "0x04003C0010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0010 ",
+        "MSRValue": "0x10003C0010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0010 ",
+        "MSRValue": "0x3F803C0010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3.",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000010020 ",
+        "MSRValue": "0x0000010020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type.",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080020020 ",
+        "MSRValue": "0x0080020020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_NONE",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100020020 ",
+        "MSRValue": "0x0100020020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200020020 ",
+        "MSRValue": "0x0200020020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_MISS",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400020020 ",
+        "MSRValue": "0x0400020020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000020020 ",
+        "MSRValue": "0x1000020020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f80020020 ",
+        "MSRValue": "0x3F80020020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & ANY_SNOOP",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00803c0020 ",
+        "MSRValue": "0x00803C0020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01003c0020 ",
+        "MSRValue": "0x01003C0020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02003c0020 ",
+        "MSRValue": "0x02003C0020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0020 ",
+        "MSRValue": "0x04003C0020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0020 ",
+        "MSRValue": "0x10003C0020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0020 ",
+        "MSRValue": "0x3F803C0020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3.",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000010040 ",
+        "MSRValue": "0x0000010040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that have any response type.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080020040 ",
+        "MSRValue": "0x0080020040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100020040 ",
+        "MSRValue": "0x0100020040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200020040 ",
+        "MSRValue": "0x0200020040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400020040 ",
+        "MSRValue": "0x0400020040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000020040 ",
+        "MSRValue": "0x1000020040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f80020040 ",
+        "MSRValue": "0x3F80020040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00803c0040 ",
+        "MSRValue": "0x00803C0040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01003c0040 ",
+        "MSRValue": "0x01003C0040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02003c0040 ",
+        "MSRValue": "0x02003C0040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0040 ",
+        "MSRValue": "0x04003C0040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0040 ",
+        "MSRValue": "0x10003C0040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0040 ",
+        "MSRValue": "0x3F803C0040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000010080 ",
+        "MSRValue": "0x0000010080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080020080 ",
+        "MSRValue": "0x0080020080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100020080 ",
+        "MSRValue": "0x0100020080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200020080 ",
+        "MSRValue": "0x0200020080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400020080 ",
+        "MSRValue": "0x0400020080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000020080 ",
+        "MSRValue": "0x1000020080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f80020080 ",
+        "MSRValue": "0x3F80020080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00803c0080 ",
+        "MSRValue": "0x00803C0080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01003c0080 ",
+        "MSRValue": "0x01003C0080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02003c0080 ",
+        "MSRValue": "0x02003C0080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0080 ",
+        "MSRValue": "0x04003C0080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0080 ",
+        "MSRValue": "0x10003C0080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0080 ",
+        "MSRValue": "0x3F803C0080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000010100 ",
+        "MSRValue": "0x0000010100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080020100 ",
+        "MSRValue": "0x0080020100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NONE",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100020100 ",
+        "MSRValue": "0x0100020100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200020100 ",
+        "MSRValue": "0x0200020100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_MISS",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400020100 ",
+        "MSRValue": "0x0400020100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000020100 ",
+        "MSRValue": "0x1000020100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f80020100 ",
+        "MSRValue": "0x3F80020100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & ANY_SNOOP",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00803c0100 ",
+        "MSRValue": "0x00803C0100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01003c0100 ",
+        "MSRValue": "0x01003C0100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02003c0100 ",
+        "MSRValue": "0x02003C0100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0100 ",
+        "MSRValue": "0x04003C0100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0100 ",
+        "MSRValue": "0x10003C0100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0100 ",
+        "MSRValue": "0x3F803C0100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000010200 ",
+        "MSRValue": "0x0000010200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that have any response type.",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080020200 ",
+        "MSRValue": "0x0080020200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100020200 ",
+        "MSRValue": "0x0100020200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200020200 ",
+        "MSRValue": "0x0200020200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400020200 ",
+        "MSRValue": "0x0400020200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000020200 ",
+        "MSRValue": "0x1000020200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f80020200 ",
+        "MSRValue": "0x3F80020200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00803c0200 ",
+        "MSRValue": "0x00803C0200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01003c0200 ",
+        "MSRValue": "0x01003C0200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02003c0200 ",
+        "MSRValue": "0x02003C0200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response.",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0200 ",
+        "MSRValue": "0x04003C0200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0200 ",
+        "MSRValue": "0x10003C0200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0200 ",
+        "MSRValue": "0x3F803C0200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3.",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts any other requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000018000 ",
+        "MSRValue": "0x0000018000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts any other requests that have any response type.",
+        "BriefDescription": "Counts any other requests have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080028000 ",
+        "MSRValue": "0x0080028000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NONE",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100028000 ",
+        "MSRValue": "0x0100028000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200028000 ",
+        "MSRValue": "0x0200028000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_MISS",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400028000 ",
+        "MSRValue": "0x0400028000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000028000 ",
+        "MSRValue": "0x1000028000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HITM",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f80028000 ",
+        "MSRValue": "0x3F80028000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & SUPPLIER_NONE & ANY_SNOOP",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts any other requests that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00803c8000 ",
+        "MSRValue": "0x00803C8000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts any other requests that hit in the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01003c8000 ",
+        "MSRValue": "0x01003C8000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts any other requests that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02003c8000 ",
+        "MSRValue": "0x02003C8000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts any other requests that hit in the L3 with a snoop miss response.",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts any other requests that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c8000 ",
+        "MSRValue": "0x04003C8000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts any other requests that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c8000 ",
+        "MSRValue": "0x10003C8000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts any other requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c8000 ",
+        "MSRValue": "0x3F803C8000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts any other requests that hit in the L3.",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000010090 ",
+        "MSRValue": "0x0000010090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch data reads that have any response type.",
+        "BriefDescription": "Counts all prefetch data reads have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080020090 ",
+        "MSRValue": "0x0080020090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100020090 ",
+        "MSRValue": "0x0100020090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200020090 ",
+        "MSRValue": "0x0200020090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400020090 ",
+        "MSRValue": "0x0400020090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000020090 ",
+        "MSRValue": "0x1000020090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f80020090 ",
+        "MSRValue": "0x3F80020090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00803c0090 ",
+        "MSRValue": "0x00803C0090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch data reads that hit in the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01003c0090 ",
+        "MSRValue": "0x01003C0090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02003c0090 ",
+        "MSRValue": "0x02003C0090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch data reads that hit in the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0090 ",
+        "MSRValue": "0x04003C0090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0090 ",
+        "MSRValue": "0x10003C0090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0090 ",
+        "MSRValue": "0x3F803C0090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch data reads that hit in the L3.",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000010120 ",
+        "MSRValue": "0x0000010120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch RFOs that have any response type.",
+        "BriefDescription": "Counts prefetch RFOs have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080020120 ",
+        "MSRValue": "0x0080020120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NONE",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100020120 ",
+        "MSRValue": "0x0100020120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200020120 ",
+        "MSRValue": "0x0200020120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_MISS",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400020120 ",
+        "MSRValue": "0x0400020120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000020120 ",
+        "MSRValue": "0x1000020120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_HITM",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f80020120 ",
+        "MSRValue": "0x3F80020120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & ANY_SNOOP",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00803c0120 ",
+        "MSRValue": "0x00803C0120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch RFOs that hit in the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01003c0120 ",
+        "MSRValue": "0x01003C0120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02003c0120 ",
+        "MSRValue": "0x02003C0120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch RFOs that hit in the L3 with a snoop miss response.",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0120 ",
+        "MSRValue": "0x04003C0120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0120 ",
+        "MSRValue": "0x10003C0120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0120 ",
+        "MSRValue": "0x3F803C0120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch RFOs that hit in the L3.",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000010240 ",
+        "MSRValue": "0x0000010240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch code reads that have any response type.",
+        "BriefDescription": "Counts all prefetch code reads have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080020240 ",
+        "MSRValue": "0x0080020240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100020240 ",
+        "MSRValue": "0x0100020240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200020240 ",
+        "MSRValue": "0x0200020240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400020240 ",
+        "MSRValue": "0x0400020240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000020240 ",
+        "MSRValue": "0x1000020240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f80020240 ",
+        "MSRValue": "0x3F80020240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00803c0240 ",
+        "MSRValue": "0x00803C0240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch code reads that hit in the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01003c0240 ",
+        "MSRValue": "0x01003C0240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02003c0240 ",
+        "MSRValue": "0x02003C0240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch code reads that hit in the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0240 ",
+        "MSRValue": "0x04003C0240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0240 ",
+        "MSRValue": "0x10003C0240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0240 ",
+        "MSRValue": "0x3F803C0240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch code reads that hit in the L3.",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000010091 ",
+        "MSRValue": "0x0000010091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch data reads that have any response type.",
+        "BriefDescription": "Counts all demand & prefetch data reads have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080020091 ",
+        "MSRValue": "0x0080020091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100020091 ",
+        "MSRValue": "0x0100020091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200020091 ",
+        "MSRValue": "0x0200020091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400020091 ",
+        "MSRValue": "0x0400020091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000020091 ",
+        "MSRValue": "0x1000020091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f80020091 ",
+        "MSRValue": "0x3F80020091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00803c0091 ",
+        "MSRValue": "0x00803C0091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01003c0091 ",
+        "MSRValue": "0x01003C0091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02003c0091 ",
+        "MSRValue": "0x02003C0091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0091 ",
+        "MSRValue": "0x04003C0091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0091 ",
+        "MSRValue": "0x10003C0091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0091 ",
+        "MSRValue": "0x3F803C0091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3.",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000010122 ",
+        "MSRValue": "0x0000010122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch RFOs that have any response type.",
+        "BriefDescription": "Counts all demand & prefetch RFOs have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080020122 ",
+        "MSRValue": "0x0080020122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NONE",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100020122 ",
+        "MSRValue": "0x0100020122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200020122 ",
+        "MSRValue": "0x0200020122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_MISS",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400020122 ",
+        "MSRValue": "0x0400020122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000020122 ",
+        "MSRValue": "0x1000020122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_HITM",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f80020122 ",
+        "MSRValue": "0x3F80020122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & SUPPLIER_NONE & ANY_SNOOP",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00803c0122 ",
+        "MSRValue": "0x00803C0122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01003c0122 ",
+        "MSRValue": "0x01003C0122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02003c0122 ",
+        "MSRValue": "0x02003C0122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0122 ",
+        "MSRValue": "0x04003C0122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0122 ",
+        "MSRValue": "0x10003C0122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0122 ",
+        "MSRValue": "0x3F803C0122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3.",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     }
index 689d478dae93bf47300f56a0cc00cd67c7eb242a..15291239c12853750113dec6992b114f82af53b4 100644 (file)
@@ -1,24 +1,26 @@
 [
     {
-        "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
+        "PEBS": "1",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
         "EventCode": "0xC1",
         "Counter": "0,1,2,3",
         "UMask": "0x8",
         "Errata": "BDM30",
         "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+        "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable (Precise Event)",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
+        "PEBS": "1",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
         "EventCode": "0xC1",
         "Counter": "0,1,2,3",
         "UMask": "0x10",
         "Errata": "BDM30",
         "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+        "BriefDescription": "Number of transitions from legacy SSE to AVX-256 when penalty applicable (Precise Event)",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
@@ -45,7 +47,7 @@
         "UMask": "0x3",
         "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+        "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single precision?)",
         "CounterHTOff": "0,1,2,3"
     },
     {
@@ -54,7 +56,7 @@
         "UMask": "0x4",
         "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired.  Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+        "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired.  Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
         "CounterHTOff": "0,1,2,3"
     },
     {
@@ -63,7 +65,7 @@
         "UMask": "0x8",
         "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired.  Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+        "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired.  Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
         "CounterHTOff": "0,1,2,3"
     },
     {
@@ -72,7 +74,7 @@
         "UMask": "0x10",
         "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired.  Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+        "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired.  Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
         "CounterHTOff": "0,1,2,3"
     },
     {
@@ -81,7 +83,7 @@
         "UMask": "0x15",
         "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
         "SampleAfterValue": "2000006",
-        "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.  ?.",
+        "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
         "CounterHTOff": "0,1,2,3"
     },
     {
@@ -90,7 +92,7 @@
         "UMask": "0x20",
         "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired.  Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+        "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired.  Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "UMask": "0x2a",
         "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
         "SampleAfterValue": "2000005",
-        "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+        "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "UMask": "0x3c",
         "EventName": "FP_ARITH_INST_RETIRED.PACKED",
         "SampleAfterValue": "2000004",
-        "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+        "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single-precision?)",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+        "PEBS": "1",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
         "EventCode": "0xCA",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
         "EventName": "FP_ASSIST.X87_OUTPUT",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Number of X87 assists due to output value.",
+        "BriefDescription": "output - Numeric Overflow, Numeric Underflow, Inexact Result  (Precise Event)",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
+        "PEBS": "1",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
         "EventCode": "0xCA",
         "Counter": "0,1,2,3",
         "UMask": "0x4",
         "EventName": "FP_ASSIST.X87_INPUT",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Number of X87 assists due to input value.",
+        "BriefDescription": "input - Invalid Operation, Denormal Operand, SNaN Operand  (Precise Event)",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+        "PEBS": "1",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
         "EventCode": "0xCA",
         "Counter": "0,1,2,3",
         "UMask": "0x8",
         "EventName": "FP_ASSIST.SIMD_OUTPUT",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Number of SIMD FP assists due to Output values",
+        "BriefDescription": "SSE* FP micro-code assist when output value is invalid. (Precise Event)",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+        "PEBS": "1",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts any input SSE* floating-point (FP) assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
         "EventCode": "0xCA",
         "Counter": "0,1,2,3",
         "UMask": "0x10",
         "EventName": "FP_ASSIST.SIMD_INPUT",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Number of SIMD FP assists due to input values",
+        "BriefDescription": "Any input SSE* FP Assist -   (Precise Event)",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
+        "PEBS": "1",
+        "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1. Uses PEBS.",
         "EventCode": "0xCA",
         "Counter": "0,1,2,3",
         "UMask": "0x1e",
         "EventName": "FP_ASSIST.ANY",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Cycles with any input/output SSE or FP assist",
+        "BriefDescription": "Counts any FP_ASSIST umask was incrementing   (Precise Event)",
         "CounterMask": "1",
         "CounterHTOff": "0,1,2,3"
     }
index 7142c76d7f11512ff05f3072d1d45fa97cf328b2..aa4a5d762f212bf5481eab7f78a6d714bd19e406 100644 (file)
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4  x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+        "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
         "EventCode": "0x9C",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
+        "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.",
         "EventCode": "0xAB",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
index c9154cebbdf0c060f8149ddec10bfa1bb34879d8..b6b5247d3d5a79b4dd8c17346a82b0a202d9d7c5 100644 (file)
     },
     {
         "PEBS": "2",
-        "PublicDescription": "This event counts loads with latency value being above four.",
+        "PublicDescription": "Counts randomly selected loads with latency value being above four.",
         "EventCode": "0xCD",
         "MSRValue": "0x4",
         "Counter": "3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Loads with latency value being above 4",
+        "BriefDescription": "Randomly selected loads with latency value being above 4",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
     {
         "PEBS": "2",
-        "PublicDescription": "This event counts loads with latency value being above eight.",
+        "PublicDescription": "Counts randomly selected loads with latency value being above eight.",
         "EventCode": "0xCD",
         "MSRValue": "0x8",
         "Counter": "3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "50021",
-        "BriefDescription": "Loads with latency value being above 8",
+        "BriefDescription": "Randomly selected loads with latency value being above 8",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
     {
         "PEBS": "2",
-        "PublicDescription": "This event counts loads with latency value being above 16.",
+        "PublicDescription": "Counts randomly selected loads with latency value being above 16.",
         "EventCode": "0xCD",
         "MSRValue": "0x10",
         "Counter": "3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "20011",
-        "BriefDescription": "Loads with latency value being above 16",
+        "BriefDescription": "Randomly selected loads with latency value being above 16",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
     {
         "PEBS": "2",
-        "PublicDescription": "This event counts loads with latency value being above 32.",
+        "PublicDescription": "Counts randomly selected loads with latency value being above 32.",
         "EventCode": "0xCD",
         "MSRValue": "0x20",
         "Counter": "3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Loads with latency value being above 32",
+        "BriefDescription": "Randomly selected loads with latency value being above 32",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
     {
         "PEBS": "2",
-        "PublicDescription": "This event counts loads with latency value being above 64.",
+        "PublicDescription": "Counts randomly selected loads with latency value being above 64.",
         "EventCode": "0xCD",
         "MSRValue": "0x40",
         "Counter": "3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "2003",
-        "BriefDescription": "Loads with latency value being above 64",
+        "BriefDescription": "Randomly selected loads with latency value being above 64",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
     {
         "PEBS": "2",
-        "PublicDescription": "This event counts loads with latency value being above 128.",
+        "PublicDescription": "Counts randomly selected loads with latency value being above 128.",
         "EventCode": "0xCD",
         "MSRValue": "0x80",
         "Counter": "3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "1009",
-        "BriefDescription": "Loads with latency value being above 128",
+        "BriefDescription": "Randomly selected loads with latency value being above 128",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
     {
         "PEBS": "2",
-        "PublicDescription": "This event counts loads with latency value being above 256.",
+        "PublicDescription": "Counts randomly selected loads with latency value being above 256.",
         "EventCode": "0xCD",
         "MSRValue": "0x100",
         "Counter": "3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "503",
-        "BriefDescription": "Loads with latency value being above 256",
+        "BriefDescription": "Randomly selected loads with latency value being above 256",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
     {
         "PEBS": "2",
-        "PublicDescription": "This event counts loads with latency value being above 512.",
+        "PublicDescription": "Counts randomly selected loads with latency value being above 512.",
         "EventCode": "0xCD",
         "MSRValue": "0x200",
         "Counter": "3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "101",
-        "BriefDescription": "Loads with latency value being above 512",
+        "BriefDescription": "Randomly selected loads with latency value being above 512",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2000020001 ",
+        "MSRValue": "0x2000020001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts demand data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x20003c0001 ",
+        "MSRValue": "0x20003C0001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that hit in the L3 and the target was non-DRAM system address.",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0084000001 ",
+        "MSRValue": "0x0084000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0104000001 ",
+        "MSRValue": "0x0104000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0204000001 ",
+        "MSRValue": "0x0204000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0404000001 ",
+        "MSRValue": "0x0404000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1004000001 ",
+        "MSRValue": "0x1004000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2004000001 ",
+        "MSRValue": "0x2004000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f84000001 ",
+        "MSRValue": "0x3F84000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts demand data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc000001 ",
+        "MSRValue": "0x00BC000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that miss the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c000001 ",
+        "MSRValue": "0x013C000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts demand data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c000001 ",
+        "MSRValue": "0x023C000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that miss the L3 with a snoop miss response.",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c000001 ",
+        "MSRValue": "0x043C000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x20003c0002 ",
+        "MSRValue": "0x20003C0002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the target was non-DRAM system address.",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f84000002 ",
+        "MSRValue": "0x3F84000002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc000002 ",
+        "MSRValue": "0x00BC000002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c000002 ",
+        "MSRValue": "0x013C000002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c000002 ",
+        "MSRValue": "0x023C000002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c000002 ",
+        "MSRValue": "0x043C000002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2000020004 ",
+        "MSRValue": "0x2000020004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x20003c0004 ",
+        "MSRValue": "0x20003C0004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand code reads that hit in the L3 and the target was non-DRAM system address.",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0084000004 ",
+        "MSRValue": "0x0084000004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0104000004 ",
+        "MSRValue": "0x0104000004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0204000004 ",
+        "MSRValue": "0x0204000004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0404000004 ",
+        "MSRValue": "0x0404000004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1004000004 ",
+        "MSRValue": "0x1004000004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2004000004 ",
+        "MSRValue": "0x2004000004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f84000004 ",
+        "MSRValue": "0x3F84000004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc000004 ",
+        "MSRValue": "0x00BC000004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand code reads that miss the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c000004 ",
+        "MSRValue": "0x013C000004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c000004 ",
+        "MSRValue": "0x023C000004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand code reads that miss the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c000004 ",
+        "MSRValue": "0x043C000004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all demand code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2000020008 ",
+        "MSRValue": "0x2000020008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x20003c0008 ",
+        "MSRValue": "0x20003C0008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the target was non-DRAM system address.",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0084000008 ",
+        "MSRValue": "0x0084000008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0104000008 ",
+        "MSRValue": "0x0104000008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0204000008 ",
+        "MSRValue": "0x0204000008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0404000008 ",
+        "MSRValue": "0x0404000008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1004000008 ",
+        "MSRValue": "0x1004000008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2004000008 ",
+        "MSRValue": "0x2004000008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f84000008 ",
+        "MSRValue": "0x3F84000008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts writebacks (modified to exclusive) that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc000008 ",
+        "MSRValue": "0x00BC000008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts writebacks (modified to exclusive) that miss the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c000008 ",
+        "MSRValue": "0x013C000008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts writebacks (modified to exclusive) that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c000008 ",
+        "MSRValue": "0x023C000008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts writebacks (modified to exclusive) that miss the L3 with a snoop miss response.",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts writebacks (modified to exclusive)",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c000008 ",
+        "MSRValue": "0x043C000008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "COREWB & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts writebacks (modified to exclusive)",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2000020010 ",
+        "MSRValue": "0x2000020010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x20003c0010 ",
+        "MSRValue": "0x20003C0010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the target was non-DRAM system address.",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0084000010 ",
+        "MSRValue": "0x0084000010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0104000010 ",
+        "MSRValue": "0x0104000010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0204000010 ",
+        "MSRValue": "0x0204000010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0404000010 ",
+        "MSRValue": "0x0404000010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1004000010 ",
+        "MSRValue": "0x1004000010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2004000010 ",
+        "MSRValue": "0x2004000010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f84000010 ",
+        "MSRValue": "0x3F84000010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc000010 ",
+        "MSRValue": "0x00BC000010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c000010 ",
+        "MSRValue": "0x013C000010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c000010 ",
+        "MSRValue": "0x023C000010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with a snoop miss response.",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c000010 ",
+        "MSRValue": "0x043C000010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2000020020 ",
+        "MSRValue": "0x2000020020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x20003c0020 ",
+        "MSRValue": "0x20003C0020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the target was non-DRAM system address.",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0084000020 ",
+        "MSRValue": "0x0084000020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0104000020 ",
+        "MSRValue": "0x0104000020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0204000020 ",
+        "MSRValue": "0x0204000020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0404000020 ",
+        "MSRValue": "0x0404000020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1004000020 ",
+        "MSRValue": "0x1004000020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2004000020 ",
+        "MSRValue": "0x2004000020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f84000020 ",
+        "MSRValue": "0x3F84000020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc000020 ",
+        "MSRValue": "0x00BC000020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c000020 ",
+        "MSRValue": "0x013C000020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c000020 ",
+        "MSRValue": "0x023C000020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c000020 ",
+        "MSRValue": "0x043C000020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2000020040 ",
+        "MSRValue": "0x2000020040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x20003c0040 ",
+        "MSRValue": "0x20003C0040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0084000040 ",
+        "MSRValue": "0x0084000040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0104000040 ",
+        "MSRValue": "0x0104000040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0204000040 ",
+        "MSRValue": "0x0204000040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0404000040 ",
+        "MSRValue": "0x0404000040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1004000040 ",
+        "MSRValue": "0x1004000040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2004000040 ",
+        "MSRValue": "0x2004000040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f84000040 ",
+        "MSRValue": "0x3F84000040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc000040 ",
+        "MSRValue": "0x00BC000040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c000040 ",
+        "MSRValue": "0x013C000040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c000040 ",
+        "MSRValue": "0x023C000040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c000040 ",
+        "MSRValue": "0x043C000040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L2_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2000020080 ",
+        "MSRValue": "0x2000020080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x20003c0080 ",
+        "MSRValue": "0x20003C0080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the target was non-DRAM system address.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0084000080 ",
+        "MSRValue": "0x0084000080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0104000080 ",
+        "MSRValue": "0x0104000080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0204000080 ",
+        "MSRValue": "0x0204000080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0404000080 ",
+        "MSRValue": "0x0404000080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1004000080 ",
+        "MSRValue": "0x1004000080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2004000080 ",
+        "MSRValue": "0x2004000080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f84000080 ",
+        "MSRValue": "0x3F84000080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc000080 ",
+        "MSRValue": "0x00BC000080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c000080 ",
+        "MSRValue": "0x013C000080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c000080 ",
+        "MSRValue": "0x023C000080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c000080 ",
+        "MSRValue": "0x043C000080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2000020100 ",
+        "MSRValue": "0x2000020100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x20003c0100 ",
+        "MSRValue": "0x20003C0100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the target was non-DRAM system address.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0084000100 ",
+        "MSRValue": "0x0084000100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0104000100 ",
+        "MSRValue": "0x0104000100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0204000100 ",
+        "MSRValue": "0x0204000100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0404000100 ",
+        "MSRValue": "0x0404000100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1004000100 ",
+        "MSRValue": "0x1004000100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2004000100 ",
+        "MSRValue": "0x2004000100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f84000100 ",
+        "MSRValue": "0x3F84000100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc000100 ",
+        "MSRValue": "0x00BC000100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c000100 ",
+        "MSRValue": "0x013C000100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c000100 ",
+        "MSRValue": "0x023C000100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c000100 ",
+        "MSRValue": "0x043C000100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2000020200 ",
+        "MSRValue": "0x2000020200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x20003c0200 ",
+        "MSRValue": "0x20003C0200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address.",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0084000200 ",
+        "MSRValue": "0x0084000200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0104000200 ",
+        "MSRValue": "0x0104000200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0204000200 ",
+        "MSRValue": "0x0204000200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0404000200 ",
+        "MSRValue": "0x0404000200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1004000200 ",
+        "MSRValue": "0x1004000200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2004000200 ",
+        "MSRValue": "0x2004000200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f84000200 ",
+        "MSRValue": "0x3F84000200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc000200 ",
+        "MSRValue": "0x00BC000200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c000200 ",
+        "MSRValue": "0x013C000200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c000200 ",
+        "MSRValue": "0x023C000200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response.",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c000200 ",
+        "MSRValue": "0x043C000200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "PF_L3_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2000028000 ",
+        "MSRValue": "0x2000028000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts any other requests that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x20003c8000 ",
+        "MSRValue": "0x20003C8000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts any other requests that hit in the L3 and the target was non-DRAM system address.",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0084008000 ",
+        "MSRValue": "0x0084008000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0104008000 ",
+        "MSRValue": "0x0104008000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0204008000 ",
+        "MSRValue": "0x0204008000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0404008000 ",
+        "MSRValue": "0x0404008000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1004008000 ",
+        "MSRValue": "0x1004008000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2004008000 ",
+        "MSRValue": "0x2004008000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f84008000 ",
+        "MSRValue": "0x3F84008000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts any other requests that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc008000 ",
+        "MSRValue": "0x00BC008000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts any other requests that miss the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c008000 ",
+        "MSRValue": "0x013C008000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts any other requests that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c008000 ",
+        "MSRValue": "0x023C008000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts any other requests that miss the L3 with a snoop miss response.",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c008000 ",
+        "MSRValue": "0x043C008000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "OTHER & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2000020090 ",
+        "MSRValue": "0x2000020090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x20003c0090 ",
+        "MSRValue": "0x20003C0090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the target was non-DRAM system address.",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0084000090 ",
+        "MSRValue": "0x0084000090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0104000090 ",
+        "MSRValue": "0x0104000090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0204000090 ",
+        "MSRValue": "0x0204000090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0404000090 ",
+        "MSRValue": "0x0404000090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1004000090 ",
+        "MSRValue": "0x1004000090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2004000090 ",
+        "MSRValue": "0x2004000090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f84000090 ",
+        "MSRValue": "0x3F84000090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc000090 ",
+        "MSRValue": "0x00BC000090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch data reads that miss the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c000090 ",
+        "MSRValue": "0x013C000090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c000090 ",
+        "MSRValue": "0x023C000090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch data reads that miss the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c000090 ",
+        "MSRValue": "0x043C000090",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2000020120 ",
+        "MSRValue": "0x2000020120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x20003c0120 ",
+        "MSRValue": "0x20003C0120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the target was non-DRAM system address.",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0084000120 ",
+        "MSRValue": "0x0084000120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0104000120 ",
+        "MSRValue": "0x0104000120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0204000120 ",
+        "MSRValue": "0x0204000120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0404000120 ",
+        "MSRValue": "0x0404000120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1004000120 ",
+        "MSRValue": "0x1004000120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2004000120 ",
+        "MSRValue": "0x2004000120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f84000120 ",
+        "MSRValue": "0x3F84000120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc000120 ",
+        "MSRValue": "0x00BC000120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch RFOs that miss the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c000120 ",
+        "MSRValue": "0x013C000120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c000120 ",
+        "MSRValue": "0x023C000120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch RFOs that miss the L3 with a snoop miss response.",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c000120 ",
+        "MSRValue": "0x043C000120",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2000020240 ",
+        "MSRValue": "0x2000020240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x20003c0240 ",
+        "MSRValue": "0x20003C0240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch code reads that hit in the L3 and the target was non-DRAM system address.",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0084000240 ",
+        "MSRValue": "0x0084000240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0104000240 ",
+        "MSRValue": "0x0104000240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0204000240 ",
+        "MSRValue": "0x0204000240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0404000240 ",
+        "MSRValue": "0x0404000240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1004000240 ",
+        "MSRValue": "0x1004000240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2004000240 ",
+        "MSRValue": "0x2004000240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f84000240 ",
+        "MSRValue": "0x3F84000240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc000240 ",
+        "MSRValue": "0x00BC000240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch code reads that miss the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c000240 ",
+        "MSRValue": "0x013C000240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c000240 ",
+        "MSRValue": "0x023C000240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch code reads that miss the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch code reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c000240 ",
+        "MSRValue": "0x043C000240",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_PF_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all prefetch code reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2000020091 ",
+        "MSRValue": "0x2000020091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x20003c0091 ",
+        "MSRValue": "0x20003C0091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the target was non-DRAM system address.",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0084000091 ",
+        "MSRValue": "0x0084000091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0104000091 ",
+        "MSRValue": "0x0104000091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0204000091 ",
+        "MSRValue": "0x0204000091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0404000091 ",
+        "MSRValue": "0x0404000091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1004000091 ",
+        "MSRValue": "0x1004000091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2004000091 ",
+        "MSRValue": "0x2004000091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f84000091 ",
+        "MSRValue": "0x3F84000091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc000091 ",
+        "MSRValue": "0x00BC000091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c000091 ",
+        "MSRValue": "0x013C000091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c000091 ",
+        "MSRValue": "0x023C000091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c000091 ",
+        "MSRValue": "0x043C000091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all demand & prefetch data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2000020122 ",
+        "MSRValue": "0x2000020122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x20003c0122 ",
+        "MSRValue": "0x20003C0122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the target was non-DRAM system address.",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0084000122 ",
+        "MSRValue": "0x0084000122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0104000122 ",
+        "MSRValue": "0x0104000122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0204000122 ",
+        "MSRValue": "0x0204000122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0404000122 ",
+        "MSRValue": "0x0404000122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1004000122 ",
+        "MSRValue": "0x1004000122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x2004000122 ",
+        "MSRValue": "0x2004000122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f84000122 ",
+        "MSRValue": "0x3F84000122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc000122 ",
+        "MSRValue": "0x00BC000122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 with no details on snoop-related information.",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c000122 ",
+        "MSRValue": "0x013C000122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c000122 ",
+        "MSRValue": "0x023C000122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 with a snoop miss response.",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c000122 ",
+        "MSRValue": "0x043C000122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "ALL_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts all demand & prefetch RFOs",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     }
index 999cf30663639b627ebcf0204f589cd27738fd1e..bb25574b8d212f5eded019d059c6ba30ef2e958b 100644 (file)
@@ -1,7 +1,6 @@
 [
     {
         "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 0",
         "UMask": "0x1",
         "EventName": "INST_RETIRED.ANY",
@@ -11,7 +10,6 @@
     },
     {
         "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 1",
         "UMask": "0x2",
         "EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -20,7 +18,6 @@
         "CounterHTOff": "Fixed counter 1"
     },
     {
-        "EventCode": "0x00",
         "Counter": "Fixed counter 1",
         "UMask": "0x2",
         "AnyThread": "1",
@@ -31,7 +28,6 @@
     },
     {
         "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'.  This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'.  After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 2",
         "UMask": "0x3",
         "EventName": "CPU_CLK_UNHALTED.REF_TSC",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
+        "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
         "EventCode": "0x87",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
-        "EventCode": "0xA2",
+        "PublicDescription": "This event counts resource-related stall cycles.",
+        "EventCode": "0xa2",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "RESOURCE_STALLS.ANY",
         "CounterHTOff": "2"
     },
     {
+        "PublicDescription": "Number of Uops delivered by the LSD.",
         "EventCode": "0xA8",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "CounterHTOff": "1"
     },
     {
-        "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+        "PEBS": "1",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
         "EventCode": "0xC0",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
+        "PEBS": "1",
         "EventCode": "0xC1",
         "Counter": "0,1,2,3",
         "UMask": "0x40",
         "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "Data_LA": "1"
     },
     {
-        "PublicDescription": "This event counts cycles without actually retired uops.",
+        "PEBS": "1",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts cycles without actually retired uops.",
         "EventCode": "0xC2",
         "Invert": "1",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "UOPS_RETIRED.STALL_CYCLES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles without actually retired uops.",
+        "BriefDescription": "Cycles no executable uops retired (Precise Event)",
         "CounterMask": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+        "PEBS": "1",
+        "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to  PEBS uops retired event.",
         "EventCode": "0xC2",
         "Invert": "1",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles with less than 10 actually retired uops.",
+        "BriefDescription": "Number of cycles using always true condition applied to  PEBS uops retired event.",
         "CounterMask": "10",
         "CounterHTOff": "0,1,2,3"
     },
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts not taken branch instructions retired.",
+        "PEBS": "1",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts not taken branch instructions retired.",
         "EventCode": "0xC4",
         "Counter": "0,1,2,3",
         "UMask": "0x10",
         "EventName": "BR_INST_RETIRED.NOT_TAKEN",
         "SampleAfterValue": "400009",
-        "BriefDescription": "Not taken branch instructions retired.",
+        "BriefDescription": "Counts all not taken macro branch instructions retired. (Precise Event)",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts far branch instructions retired.",
+        "PEBS": "1",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts far branch instructions retired.",
         "EventCode": "0xC4",
         "Counter": "0,1,2,3",
         "UMask": "0x40",
         "Errata": "BDW98",
         "EventName": "BR_INST_RETIRED.FAR_BRANCH",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Far branch instructions retired.",
+        "BriefDescription": "Counts the number of far branch instructions retired.(Precise Event)",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
index 4ad425312bdc4744880ef420eed7a686e54780c9..bf243fe2a0ec3c649c88d619d7a82a7aec63828a 100644 (file)
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
         "SampleAfterValue": "100003",
         "L1_Hit_Indication": "1",
         "CounterHTOff": "0,1,2,3"
index 0d04bf9db0008b8f46e814be69320eb9244023ea..e2f0540625a240ac986ba54a07ed477ae37c531f 100644 (file)
@@ -1,6 +1,5 @@
 [
     {
-        "EventCode": "0x00",
         "UMask": "0x1",
         "BriefDescription": "Instructions retired from execution.",
         "Counter": "Fixed counter 0",
@@ -10,7 +9,6 @@
         "CounterHTOff": "Fixed counter 0"
     },
     {
-        "EventCode": "0x00",
         "UMask": "0x2",
         "BriefDescription": "Core cycles when the thread is not in halt state",
         "Counter": "Fixed counter 1",
@@ -20,7 +18,6 @@
         "CounterHTOff": "Fixed counter 1"
     },
     {
-        "EventCode": "0x00",
         "UMask": "0x2",
         "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
         "Counter": "Fixed counter 1",
@@ -30,7 +27,6 @@
         "CounterHTOff": "Fixed counter 1"
     },
     {
-        "EventCode": "0x00",
         "UMask": "0x3",
         "BriefDescription": "Reference cycles when the core is not in halt state.",
         "Counter": "Fixed counter 2",
         "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
         "Counter": "0,1,2,3",
         "EventName": "ILD_STALL.LCP",
-        "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
+        "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
index 5a7f1ec2420048b99f5cc8561b5efa03fd71b478..c6f9762f32c06e817c0df87be6a9f06fd54013dc 100644 (file)
 [
     {
-        "BriefDescription": "Instructions Per Cycle (per logical thread)",
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Frontend_Bound"
+    },
+    {
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Frontend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Bad_Speculation"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Bad_Speculation_SMT"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Backend_Bound"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Backend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Retiring"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Retiring_SMT"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Instructions Per Cycle (per logical thread)",
         "MetricGroup": "TopDownL1",
         "MetricName": "IPC"
     },
     {
-        "BriefDescription": "Uops Per Instruction",
         "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
-        "MetricGroup": "Pipeline",
+        "BriefDescription": "Uops Per Instruction",
+        "MetricGroup": "Pipeline;Retiring",
         "MetricName": "UPI"
     },
     {
-        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Instruction per taken branch",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "IpTB"
+    },
+    {
+        "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Branch instructions per taken branch. ",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "BpTB"
+    },
+    {
         "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
-        "MetricGroup": "Frontend",
+        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+        "MetricGroup": "PGO",
         "MetricName": "IFetch_Line_Utilization"
     },
     {
-        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
-        "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
-        "MetricGroup": "DSB; Frontend_Bandwidth",
+        "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+        "MetricGroup": "DSB;Frontend_Bandwidth",
         "MetricName": "DSB_Coverage"
     },
     {
-        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricGroup": "Pipeline;Summary",
         "MetricName": "CPI"
     },
     {
-        "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
         "MetricGroup": "Summary",
         "MetricName": "CLKS"
     },
     {
-        "BriefDescription": "Total issue-pipeline slots",
-        "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+        "MetricExpr": "4 * cycles",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
         "MetricGroup": "TopDownL1",
         "MetricName": "SLOTS"
     },
     {
-        "BriefDescription": "Total number of retired Instructions",
+        "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
+        "MetricGroup": "TopDownL1_SMT",
+        "MetricName": "SLOTS_SMT"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+        "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+        "MetricGroup": "Instruction_Type;L1_Bound",
+        "MetricName": "IpL"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+        "BriefDescription": "Instructions per Store",
+        "MetricGroup": "Instruction_Type;Store_Bound",
+        "MetricName": "IpS"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Instructions per Branch",
+        "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+        "MetricName": "IpB"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+        "BriefDescription": "Instruction per (near) call",
+        "MetricGroup": "Branches",
+        "MetricName": "IpCall"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY",
+        "BriefDescription": "Total number of retired Instructions",
         "MetricGroup": "Summary",
         "MetricName": "Instructions"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / cycles",
         "BriefDescription": "Instructions Per Cycle (per physical core)",
-        "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
         "MetricGroup": "SMT",
         "MetricName": "CoreIPC"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Instructions Per Cycle (per physical core)",
+        "MetricGroup": "SMT",
+        "MetricName": "CoreIPC_SMT"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS",
+        "MetricName": "FLOPc"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS_SMT",
+        "MetricName": "FLOPc_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
         "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
-        "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
         "MetricGroup": "Pipeline;Ports_Utilization",
         "MetricName": "ILP"
     },
     {
-        "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
-        "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL  - (( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED )) ) / RS_EVENTS.EMPTY_END)",
-        "MetricGroup": "Unknown_Branches",
-        "MetricName": "BAClear_Cost"
+        "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+        "MetricGroup": "Branch_Mispredicts",
+        "MetricName": "Branch_Misprediction_Cost"
+    },
+    {
+        "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+        "MetricGroup": "Branch_Mispredicts_SMT",
+        "MetricName": "Branch_Misprediction_Cost_SMT"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+        "MetricGroup": "Branch_Mispredicts",
+        "MetricName": "IpMispredict"
     },
     {
+        "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
         "BriefDescription": "Core actual clocks when any thread is active on the physical core",
-        "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
         "MetricGroup": "SMT",
         "MetricName": "CORE_CLKS"
     },
     {
-        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
         "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
         "MetricGroup": "Memory_Bound;Memory_Lat",
         "MetricName": "Load_Miss_Real_Latency"
     },
     {
-        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
-        "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+        "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
         "MetricGroup": "Memory_Bound;Memory_BW",
         "MetricName": "MLP"
     },
     {
+        "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * cycles )",
         "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
-        "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED) ) / (2*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles))",
         "MetricGroup": "TLB",
         "MetricName": "Page_Walks_Utilization"
     },
     {
-        "BriefDescription": "Average CPU Utilization",
+        "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
+        "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+        "MetricGroup": "TLB_SMT",
+        "MetricName": "Page_Walks_Utilization_SMT"
+    },
+    {
+        "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L1D_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L2_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+        "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L3_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L1MPKI"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI"
+    },
+    {
+        "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2HPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L3MPKI"
+    },
+    {
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+        "BriefDescription": "Average CPU Utilization",
         "MetricGroup": "Summary",
         "MetricName": "CPU_Utilization"
     },
     {
+        "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
         "BriefDescription": "Giga Floating Point Operations Per Second",
-        "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
         "MetricGroup": "FLOPS;Summary",
         "MetricName": "GFLOPs"
     },
     {
-        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricGroup": "Power",
         "MetricName": "Turbo_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricGroup": "SMT;Summary",
         "MetricName": "SMT_2T_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
     {
-        "BriefDescription": "C3 residency percent per core",
+        "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+        "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "DRAM_BW_Use"
+    },
+    {
+        "MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )",
+        "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+        "MetricGroup": "Memory_Lat",
+        "MetricName": "DRAM_Read_Latency"
+    },
+    {
+        "MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@",
+        "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "DRAM_Parallel_Reads"
+    },
+    {
+        "MetricExpr": "cbox_0@event\\=0x0@",
+        "BriefDescription": "Socket actual clocks when any core is active on that socket",
+        "MetricGroup": "",
+        "MetricName": "Socket_CLKS"
+    },
+    {
         "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per core",
         "MetricName": "C3_Core_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per core",
         "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per core",
         "MetricName": "C6_Core_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per core",
         "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per core",
         "MetricName": "C7_Core_Residency"
     },
     {
-        "BriefDescription": "C2 residency percent per package",
         "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C2 residency percent per package",
         "MetricName": "C2_Pkg_Residency"
     },
     {
-        "BriefDescription": "C3 residency percent per package",
         "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per package",
         "MetricName": "C3_Pkg_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per package",
         "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per package",
         "MetricName": "C6_Pkg_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per package",
         "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per package",
         "MetricName": "C7_Pkg_Residency"
     }
 ]
index 141b1080429d357eea50304f5a5fcc1106340b98..75a3098d5775e89a24a480d5b2e98ebbacb1905f 100644 (file)
     },
     {
         "EventCode": "0x24",
-        "UMask": "0x41",
+        "UMask": "0xc1",
         "BriefDescription": "Demand Data Read requests that hit L2 cache",
         "Counter": "0,1,2,3",
         "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
-        "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+        "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache.",
         "SampleAfterValue": "200003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x24",
-        "UMask": "0x42",
+        "UMask": "0xc2",
         "BriefDescription": "RFO requests that hit L2 cache.",
         "Counter": "0,1,2,3",
         "EventName": "L2_RQSTS.RFO_HIT",
@@ -76,7 +76,7 @@
     },
     {
         "EventCode": "0x24",
-        "UMask": "0x44",
+        "UMask": "0xc4",
         "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
         "Counter": "0,1,2,3",
         "EventName": "L2_RQSTS.CODE_RD_HIT",
@@ -85,7 +85,7 @@
     },
     {
         "EventCode": "0x24",
-        "UMask": "0x50",
+        "UMask": "0xd0",
         "BriefDescription": "L2 prefetch requests that hit L2 cache",
         "Counter": "0,1,2,3",
         "EventName": "L2_RQSTS.L2_PF_HIT",
     {
         "EventCode": "0xD0",
         "UMask": "0x11",
-        "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS)",
+        "BriefDescription": "Retired load uops that miss the STLB.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+        "PublicDescription": "This event counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD0",
         "UMask": "0x12",
-        "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS)",
+        "BriefDescription": "Retired store uops that miss the STLB.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+        "PublicDescription": "This event counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
         "SampleAfterValue": "100003",
         "L1_Hit_Indication": "1",
         "CounterHTOff": "0,1,2,3"
     {
         "EventCode": "0xD0",
         "UMask": "0x21",
-        "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS)",
+        "BriefDescription": "Retired load uops with locked access.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
         "Errata": "BDM35",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with locked access retired to the architected path.",
+        "PublicDescription": "This event counts load uops with locked access retired to the architected path.",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD0",
         "UMask": "0x41",
-        "BriefDescription": "Retired load uops that split across a cacheline boundary.(Precise Event - PEBS)",
+        "BriefDescription": "Retired load uops that split across a cacheline boundary.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+        "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD0",
         "UMask": "0x42",
-        "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
+        "BriefDescription": "Retired store uops that split across a cacheline boundary.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+        "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
         "SampleAfterValue": "100003",
         "L1_Hit_Indication": "1",
         "CounterHTOff": "0,1,2,3"
     {
         "EventCode": "0xD0",
         "UMask": "0x81",
-        "BriefDescription": "All retired load uops. (Precise Event - PEBS)",
+        "BriefDescription": "All retired load uops.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
+        "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD0",
         "UMask": "0x82",
-        "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
+        "BriefDescription": "All retired store uops.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
+        "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
         "SampleAfterValue": "2000003",
         "L1_Hit_Indication": "1",
         "CounterHTOff": "0,1,2,3"
     {
         "EventCode": "0xD1",
         "UMask": "0x1",
-        "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS)",
+        "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data source were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit  even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
+        "PublicDescription": "This event counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit  even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD1",
         "UMask": "0x2",
-        "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS)",
+        "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
         "Errata": "BDM35",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+        "PublicDescription": "This event counts retired load uops which data sources were hits in the mid-level (L2) cache.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD1",
         "UMask": "0x4",
-        "BriefDescription": "Hit in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS)",
+        "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
         "Errata": "BDM100",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+        "PublicDescription": "This event counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
         "SampleAfterValue": "50021",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD1",
         "UMask": "0x8",
-        "BriefDescription": "Retired load uops misses in L1 cache as data sources. Uses PEBS.",
+        "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+        "PublicDescription": "This event counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD1",
         "UMask": "0x10",
-        "BriefDescription": "Retired load uops with L2 cache misses as data sources. Uses PEBS.",
+        "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+        "PublicDescription": "This event counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
         "SampleAfterValue": "50021",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD1",
         "UMask": "0x20",
-        "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS).",
+        "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
     {
         "EventCode": "0xD1",
         "UMask": "0x40",
-        "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS)",
+        "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit  even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+        "PublicDescription": "This event counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit  even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD2",
         "UMask": "0x1",
-        "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS)",
+        "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
         "Errata": "BDM100",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+        "PublicDescription": "This event counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
         "SampleAfterValue": "20011",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD2",
         "UMask": "0x2",
-        "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS)",
+        "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
         "Errata": "BDM100",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+        "PublicDescription": "This event counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
         "SampleAfterValue": "20011",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD2",
         "UMask": "0x4",
-        "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. (Precise Event - PEBS)",
+        "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
         "Errata": "BDM100",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+        "PublicDescription": "This event counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
         "SampleAfterValue": "20011",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD2",
         "UMask": "0x8",
-        "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required. (Precise Event - PEBS)",
+        "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
         "Errata": "BDM100",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+        "PublicDescription": "This event counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD3",
         "UMask": "0x1",
+        "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
         "Errata": "BDE70, BDM100",
-        "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
+        "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD3",
         "UMask": "0x4",
-        "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)",
+        "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
     {
         "EventCode": "0xD3",
         "UMask": "0x10",
-        "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)",
+        "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
     {
         "EventCode": "0xD3",
         "UMask": "0x20",
-        "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)",
+        "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all requests that hit in the L3",
-        "MSRValue": "0x3f803c8fff",
+        "BriefDescription": "Counts all requests hit in the L3",
+        "MSRValue": "0x3F803C8FFF",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all requests hit in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
-        "MSRValue": "0x10003c07f7",
+        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+        "MSRValue": "0x10003C07F7",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
-        "MSRValue": "0x04003c07f7",
+        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "MSRValue": "0x04003C07F7",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
-        "MSRValue": "0x04003c0244",
+        "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "MSRValue": "0x04003C0244",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
-        "MSRValue": "0x10003c0122",
+        "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+        "MSRValue": "0x10003C0122",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
-        "MSRValue": "0x04003c0122",
+        "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "MSRValue": "0x04003C0122",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
-        "MSRValue": "0x10003c0091",
+        "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+        "MSRValue": "0x10003C0091",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
-        "MSRValue": "0x04003c0091",
+        "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "MSRValue": "0x04003C0091",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3",
-        "MSRValue": "0x3f803c0200",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
+        "MSRValue": "0x3F803C0200",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3",
-        "MSRValue": "0x3f803c0100",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
+        "MSRValue": "0x3F803C0100",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
-        "MSRValue": "0x10003c0002",
+        "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+        "MSRValue": "0x10003C0002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3",
-        "MSRValue": "0x3f803c0002",
+        "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3",
+        "MSRValue": "0x3F803C0002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     }
index d7b9d9c9c518850c8b3349292df28ff82efd4343..ba0e0c4e74eb21ac923a2abe8613829a7f92c520 100644 (file)
@@ -42,7 +42,7 @@
     {
         "EventCode": "0xC7",
         "UMask": "0x3",
-        "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+        "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single precision?)",
         "Counter": "0,1,2,3",
         "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
         "SampleAfterValue": "2000003",
@@ -51,7 +51,7 @@
     {
         "EventCode": "0xC7",
         "UMask": "0x4",
-        "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired.  Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+        "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired.  Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
         "Counter": "0,1,2,3",
         "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
         "SampleAfterValue": "2000003",
@@ -60,7 +60,7 @@
     {
         "EventCode": "0xC7",
         "UMask": "0x8",
-        "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired.  Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+        "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired.  Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
         "Counter": "0,1,2,3",
         "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
         "SampleAfterValue": "2000003",
@@ -69,7 +69,7 @@
     {
         "EventCode": "0xC7",
         "UMask": "0x10",
-        "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired.  Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+        "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired.  Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
         "Counter": "0,1,2,3",
         "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
         "SampleAfterValue": "2000003",
@@ -78,7 +78,7 @@
     {
         "EventCode": "0xC7",
         "UMask": "0x15",
-        "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.  ?.",
+        "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
         "Counter": "0,1,2,3",
         "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
         "SampleAfterValue": "2000006",
@@ -87,7 +87,7 @@
     {
         "EventCode": "0xc7",
         "UMask": "0x20",
-        "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired.  Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+        "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired.  Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
         "Counter": "0,1,2,3",
         "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
         "SampleAfterValue": "2000003",
@@ -96,7 +96,7 @@
     {
         "EventCode": "0xC7",
         "UMask": "0x2a",
-        "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+        "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
         "Counter": "0,1,2,3",
         "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
         "SampleAfterValue": "2000005",
     {
         "EventCode": "0xC7",
         "UMask": "0x3c",
-        "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+        "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB.  DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single-precision?)",
         "Counter": "0,1,2,3",
         "EventName": "FP_ARITH_INST_RETIRED.PACKED",
         "SampleAfterValue": "2000004",
index d79a5cfea44bc2dcdd37960f7ba289562bce9214..ecb413bb67cafbfa4fff18ddd9d9bf9d5de3eb47 100644 (file)
     {
         "EventCode": "0xc8",
         "UMask": "0x4",
-        "BriefDescription": "Number of times HLE abort was triggered (PEBS)",
+        "BriefDescription": "Number of times HLE abort was triggered",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "HLE_RETIRED.ABORTED",
-        "PublicDescription": "Number of times HLE abort was triggered (PEBS).",
+        "PublicDescription": "Number of times HLE abort was triggered.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xc9",
         "UMask": "0x4",
-        "BriefDescription": "Number of times RTM abort was triggered (PEBS)",
+        "BriefDescription": "Number of times RTM abort was triggered",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "RTM_RETIRED.ABORTED",
-        "PublicDescription": "Number of times RTM abort was triggered (PEBS).",
+        "PublicDescription": "Number of times RTM abort was triggered .",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Loads with latency value being above 4",
+        "BriefDescription": "Randomly selected loads with latency value being above 4",
         "PEBS": "2",
         "MSRValue": "0x4",
         "Counter": "3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
         "MSRIndex": "0x3F6",
         "Errata": "BDM100, BDM35",
-        "PublicDescription": "This event counts loads with latency value being above four.",
+        "PublicDescription": "Counts randomly selected loads with latency value being above four.",
         "TakenAlone": "1",
         "SampleAfterValue": "100003",
         "CounterHTOff": "3"
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Loads with latency value being above 8",
+        "BriefDescription": "Randomly selected loads with latency value being above 8",
         "PEBS": "2",
         "MSRValue": "0x8",
         "Counter": "3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
         "MSRIndex": "0x3F6",
         "Errata": "BDM100, BDM35",
-        "PublicDescription": "This event counts loads with latency value being above eight.",
+        "PublicDescription": "Counts randomly selected loads with latency value being above eight.",
         "TakenAlone": "1",
         "SampleAfterValue": "50021",
         "CounterHTOff": "3"
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Loads with latency value being above 16",
+        "BriefDescription": "Randomly selected loads with latency value being above 16",
         "PEBS": "2",
         "MSRValue": "0x10",
         "Counter": "3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
         "MSRIndex": "0x3F6",
         "Errata": "BDM100, BDM35",
-        "PublicDescription": "This event counts loads with latency value being above 16.",
+        "PublicDescription": "Counts randomly selected loads with latency value being above 16.",
         "TakenAlone": "1",
         "SampleAfterValue": "20011",
         "CounterHTOff": "3"
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Loads with latency value being above 32",
+        "BriefDescription": "Randomly selected loads with latency value being above 32",
         "PEBS": "2",
         "MSRValue": "0x20",
         "Counter": "3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
         "MSRIndex": "0x3F6",
         "Errata": "BDM100, BDM35",
-        "PublicDescription": "This event counts loads with latency value being above 32.",
+        "PublicDescription": "Counts randomly selected loads with latency value being above 32.",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
         "CounterHTOff": "3"
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Loads with latency value being above 64",
+        "BriefDescription": "Randomly selected loads with latency value being above 64",
         "PEBS": "2",
         "MSRValue": "0x40",
         "Counter": "3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
         "MSRIndex": "0x3F6",
         "Errata": "BDM100, BDM35",
-        "PublicDescription": "This event counts loads with latency value being above 64.",
+        "PublicDescription": "Counts randomly selected loads with latency value being above 64.",
         "TakenAlone": "1",
         "SampleAfterValue": "2003",
         "CounterHTOff": "3"
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Loads with latency value being above 128",
+        "BriefDescription": "Randomly selected loads with latency value being above 128",
         "PEBS": "2",
         "MSRValue": "0x80",
         "Counter": "3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
         "MSRIndex": "0x3F6",
         "Errata": "BDM100, BDM35",
-        "PublicDescription": "This event counts loads with latency value being above 128.",
+        "PublicDescription": "Counts randomly selected loads with latency value being above 128.",
         "TakenAlone": "1",
         "SampleAfterValue": "1009",
         "CounterHTOff": "3"
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Loads with latency value being above 256",
+        "BriefDescription": "Randomly selected loads with latency value being above 256",
         "PEBS": "2",
         "MSRValue": "0x100",
         "Counter": "3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
         "MSRIndex": "0x3F6",
         "Errata": "BDM100, BDM35",
-        "PublicDescription": "This event counts loads with latency value being above 256.",
+        "PublicDescription": "Counts randomly selected loads with latency value being above 256.",
         "TakenAlone": "1",
         "SampleAfterValue": "503",
         "CounterHTOff": "3"
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Loads with latency value being above 512",
+        "BriefDescription": "Randomly selected loads with latency value being above 512",
         "PEBS": "2",
         "MSRValue": "0x200",
         "Counter": "3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
         "MSRIndex": "0x3F6",
         "Errata": "BDM100, BDM35",
-        "PublicDescription": "This event counts loads with latency value being above 512.",
+        "PublicDescription": "Counts randomly selected loads with latency value being above 512.",
         "TakenAlone": "1",
         "SampleAfterValue": "101",
         "CounterHTOff": "3"
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all requests that miss in the L3",
-        "MSRValue": "0x3fbfc08fff",
+        "BriefDescription": "Counts all requests miss in the L3",
+        "MSRValue": "0x3FBFC08FFF",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all requests miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache",
-        "MSRValue": "0x087fc007f7",
+        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
+        "MSRValue": "0x087FC007F7",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache",
-        "MSRValue": "0x103fc007f7",
+        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
+        "MSRValue": "0x103FC007F7",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram",
-        "MSRValue": "0x063bc007f7",
+        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
+        "MSRValue": "0x063BC007F7",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram",
-        "MSRValue": "0x06040007f7",
+        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
+        "MSRValue": "0x06040007F7",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3",
-        "MSRValue": "0x3fbfc007f7",
+        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
+        "MSRValue": "0x3FBFC007F7",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram",
+        "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
         "MSRValue": "0x0604000244",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch code reads that miss in the L3",
-        "MSRValue": "0x3fbfc00244",
+        "BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
+        "MSRValue": "0x3FBFC00244",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch code reads miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram",
+        "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
         "MSRValue": "0x0604000122",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3",
-        "MSRValue": "0x3fbfc00122",
+        "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
+        "MSRValue": "0x3FBFC00122",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache",
-        "MSRValue": "0x087fc00091",
+        "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
+        "MSRValue": "0x087FC00091",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache",
-        "MSRValue": "0x103fc00091",
+        "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
+        "MSRValue": "0x103FC00091",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram",
-        "MSRValue": "0x063bc00091",
+        "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
+        "MSRValue": "0x063BC00091",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram",
+        "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
         "MSRValue": "0x0604000091",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3",
-        "MSRValue": "0x3fbfc00091",
+        "BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
+        "MSRValue": "0x3FBFC00091",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3",
-        "MSRValue": "0x3fbfc00200",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
+        "MSRValue": "0x3FBFC00200",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3",
-        "MSRValue": "0x3fbfc00100",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
+        "MSRValue": "0x3FBFC00100",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache",
-        "MSRValue": "0x103fc00002",
+        "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
+        "MSRValue": "0x103FC00002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3",
-        "MSRValue": "0x3fbfc00002",
+        "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
+        "MSRValue": "0x3FBFC00002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     }
index 0d04bf9db0008b8f46e814be69320eb9244023ea..c2f6932a581737f0edd17112f74f8fee3b70396c 100644 (file)
@@ -1,6 +1,5 @@
 [
     {
-        "EventCode": "0x00",
         "UMask": "0x1",
         "BriefDescription": "Instructions retired from execution.",
         "Counter": "Fixed counter 0",
@@ -10,7 +9,6 @@
         "CounterHTOff": "Fixed counter 0"
     },
     {
-        "EventCode": "0x00",
         "UMask": "0x2",
         "BriefDescription": "Core cycles when the thread is not in halt state",
         "Counter": "Fixed counter 1",
@@ -20,7 +18,6 @@
         "CounterHTOff": "Fixed counter 1"
     },
     {
-        "EventCode": "0x00",
         "UMask": "0x2",
         "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
         "Counter": "Fixed counter 1",
@@ -30,7 +27,6 @@
         "CounterHTOff": "Fixed counter 1"
     },
     {
-        "EventCode": "0x00",
         "UMask": "0x3",
         "BriefDescription": "Reference cycles when the core is not in halt state.",
         "Counter": "Fixed counter 2",
         "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
         "Counter": "0,1,2,3",
         "EventName": "ILD_STALL.LCP",
-        "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
+        "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA2",
+        "EventCode": "0xa2",
         "UMask": "0x1",
         "BriefDescription": "Resource-related stall cycles",
         "Counter": "0,1,2,3",
         "EventName": "RESOURCE_STALLS.ANY",
-        "PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+        "PublicDescription": "This event counts resource-related stall cycles.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xC2",
         "UMask": "0x1",
-        "BriefDescription": "Actually retired uops. (Precise Event - PEBS)",
+        "BriefDescription": "Actually retired uops.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "UOPS_RETIRED.ALL",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
+        "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xC2",
         "UMask": "0x2",
-        "BriefDescription": "Retirement slots used. (Precise Event - PEBS)",
+        "BriefDescription": "Retirement slots used.",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of retirement slots used.",
+        "PublicDescription": "This event counts the number of retirement slots used.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xC4",
         "UMask": "0x1",
-        "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS)",
+        "BriefDescription": "Conditional branch instructions retired.",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "BR_INST_RETIRED.CONDITIONAL",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
+        "PublicDescription": "This event counts conditional branch instructions retired.",
         "SampleAfterValue": "400009",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xC4",
         "UMask": "0x2",
-        "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS)",
+        "BriefDescription": "Direct and indirect near call instructions retired.",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "BR_INST_RETIRED.NEAR_CALL",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
+        "PublicDescription": "This event counts both direct and indirect near call instructions retired.",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xC4",
         "UMask": "0x2",
-        "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS)",
+        "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect macro near call instructions retired (captured in ring 3).",
+        "PublicDescription": "This event counts both direct and indirect macro near call instructions retired (captured in ring 3).",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xC4",
         "UMask": "0x8",
-        "BriefDescription": "Return instructions retired. (Precise Event - PEBS)",
+        "BriefDescription": "Return instructions retired.",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "BR_INST_RETIRED.NEAR_RETURN",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
+        "PublicDescription": "This event counts return instructions retired.",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xC4",
         "UMask": "0x20",
-        "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS)",
+        "BriefDescription": "Taken branch instructions retired.",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
+        "PublicDescription": "This event counts taken branch instructions retired.",
         "SampleAfterValue": "400009",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xC5",
         "UMask": "0x1",
-        "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS)",
+        "BriefDescription": "Mispredicted conditional branch instructions retired.",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "BR_MISP_RETIRED.CONDITIONAL",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+        "PublicDescription": "This event counts mispredicted conditional branch instructions retired.",
         "SampleAfterValue": "400009",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xC5",
         "UMask": "0x8",
-        "BriefDescription": "This event counts the number of mispredicted ret instructions retired.(Precise Event)",
+        "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "BR_MISP_RETIRED.RET",
-        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted return instructions retired.",
+        "PublicDescription": "This event counts mispredicted return instructions retired.",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xC5",
         "UMask": "0x20",
-        "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
+        "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
-        "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
+        "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
         "SampleAfterValue": "400009",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
index 71e9737f4614dba62fd60d740c42201499a1f480..1a1a3501180abe93dac637b03760c3f6efeeb03b 100644 (file)
 [
     {
-        "BriefDescription": "Instructions Per Cycle (per logical thread)",
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Frontend_Bound"
+    },
+    {
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Frontend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Bad_Speculation"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Bad_Speculation_SMT"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Backend_Bound"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Backend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Retiring"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Retiring_SMT"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Instructions Per Cycle (per logical thread)",
         "MetricGroup": "TopDownL1",
         "MetricName": "IPC"
     },
     {
-        "BriefDescription": "Uops Per Instruction",
         "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
-        "MetricGroup": "Pipeline",
+        "BriefDescription": "Uops Per Instruction",
+        "MetricGroup": "Pipeline;Retiring",
         "MetricName": "UPI"
     },
     {
-        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
-        "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
-        "MetricGroup": "Frontend",
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Instruction per taken branch",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "IpTB"
+    },
+    {
+        "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Branch instructions per taken branch. ",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "BpTB"
+    },
+    {
+        "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
+        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+        "MetricGroup": "PGO",
         "MetricName": "IFetch_Line_Utilization"
     },
     {
-        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
-        "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
-        "MetricGroup": "DSB; Frontend_Bandwidth",
+        "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))",
+        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+        "MetricGroup": "DSB;Frontend_Bandwidth",
         "MetricName": "DSB_Coverage"
     },
     {
-        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricGroup": "Pipeline;Summary",
         "MetricName": "CPI"
     },
     {
-        "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
         "MetricGroup": "Summary",
         "MetricName": "CLKS"
     },
     {
-        "BriefDescription": "Total issue-pipeline slots",
-        "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+        "MetricExpr": "4 * cycles",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
         "MetricGroup": "TopDownL1",
         "MetricName": "SLOTS"
     },
     {
-        "BriefDescription": "Total number of retired Instructions",
+        "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
+        "MetricGroup": "TopDownL1_SMT",
+        "MetricName": "SLOTS_SMT"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+        "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+        "MetricGroup": "Instruction_Type;L1_Bound",
+        "MetricName": "IpL"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+        "BriefDescription": "Instructions per Store",
+        "MetricGroup": "Instruction_Type;Store_Bound",
+        "MetricName": "IpS"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Instructions per Branch",
+        "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+        "MetricName": "IpB"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+        "BriefDescription": "Instruction per (near) call",
+        "MetricGroup": "Branches",
+        "MetricName": "IpCall"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY",
+        "BriefDescription": "Total number of retired Instructions",
         "MetricGroup": "Summary",
         "MetricName": "Instructions"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / cycles",
         "BriefDescription": "Instructions Per Cycle (per physical core)",
-        "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
         "MetricGroup": "SMT",
         "MetricName": "CoreIPC"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Instructions Per Cycle (per physical core)",
+        "MetricGroup": "SMT",
+        "MetricName": "CoreIPC_SMT"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS",
+        "MetricName": "FLOPc"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS_SMT",
+        "MetricName": "FLOPc_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
         "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
-        "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
         "MetricGroup": "Pipeline;Ports_Utilization",
         "MetricName": "ILP"
     },
     {
-        "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
-        "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL  - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
-        "MetricGroup": "Unknown_Branches",
-        "MetricName": "BAClear_Cost"
+        "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+        "MetricGroup": "Branch_Mispredicts",
+        "MetricName": "Branch_Misprediction_Cost"
+    },
+    {
+        "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+        "MetricGroup": "Branch_Mispredicts_SMT",
+        "MetricName": "Branch_Misprediction_Cost_SMT"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+        "MetricGroup": "Branch_Mispredicts",
+        "MetricName": "IpMispredict"
+    },
+    {
+        "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
         "BriefDescription": "Core actual clocks when any thread is active on the physical core",
-        "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
         "MetricGroup": "SMT",
         "MetricName": "CORE_CLKS"
     },
     {
-        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
         "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
+        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
         "MetricGroup": "Memory_Bound;Memory_Lat",
         "MetricName": "Load_Miss_Real_Latency"
     },
     {
-        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
-        "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+        "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
         "MetricGroup": "Memory_Bound;Memory_BW",
         "MetricName": "MLP"
     },
     {
+        "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
         "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
-        "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
         "MetricGroup": "TLB",
         "MetricName": "Page_Walks_Utilization"
     },
     {
-        "BriefDescription": "Average CPU Utilization",
+        "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
+        "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+        "MetricGroup": "TLB_SMT",
+        "MetricName": "Page_Walks_Utilization_SMT"
+    },
+    {
+        "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L1D_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L2_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+        "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L3_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+        "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L3_Cache_Access_BW"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L1MPKI"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI"
+    },
+    {
+        "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2HPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L3MPKI"
+    },
+    {
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+        "BriefDescription": "Average CPU Utilization",
         "MetricGroup": "Summary",
         "MetricName": "CPU_Utilization"
     },
     {
+        "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
         "BriefDescription": "Giga Floating Point Operations Per Second",
-        "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
         "MetricGroup": "FLOPS;Summary",
         "MetricName": "GFLOPs"
     },
     {
-        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricGroup": "Power",
         "MetricName": "Turbo_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricGroup": "SMT;Summary",
         "MetricName": "SMT_2T_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
     {
-        "BriefDescription": "C3 residency percent per core",
+        "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+        "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "DRAM_BW_Use"
+    },
+    {
+        "MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
+        "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+        "MetricGroup": "Memory_Lat",
+        "MetricName": "DRAM_Read_Latency"
+    },
+    {
+        "MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@",
+        "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "DRAM_Parallel_Reads"
+    },
+    {
+        "MetricExpr": "( 1000000000 * ( imc@event\\=0xe0\\\\\\,umask\\=0x1@ / imc@event\\=0xe3@ ) / imc_0@event\\=0x0@ ) if 1 if 1 == 1 else 0 else 0",
+        "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
+        "MetricGroup": "Memory_Lat",
+        "MetricName": "MEM_PMM_Read_Latency"
+    },
+    {
+        "MetricExpr": "( ( 64 * imc@event\\=0xe3@ / 1000000000 ) / duration_time ) if 1 if 1 == 1 else 0 else 0",
+        "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "PMM_Read_BW"
+    },
+    {
+        "MetricExpr": "( ( 64 * imc@event\\=0xe7@ / 1000000000 ) / duration_time ) if 1 if 1 == 1 else 0 else 0",
+        "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "PMM_Write_BW"
+    },
+    {
+        "MetricExpr": "cha_0@event\\=0x0@",
+        "BriefDescription": "Socket actual clocks when any core is active on that socket",
+        "MetricGroup": "",
+        "MetricName": "Socket_CLKS"
+    },
+    {
         "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per core",
         "MetricName": "C3_Core_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per core",
         "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per core",
         "MetricName": "C6_Core_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per core",
         "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per core",
         "MetricName": "C7_Core_Residency"
     },
     {
-        "BriefDescription": "C2 residency percent per package",
         "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C2 residency percent per package",
         "MetricName": "C2_Pkg_Residency"
     },
     {
-        "BriefDescription": "C3 residency percent per package",
         "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per package",
         "MetricName": "C3_Pkg_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per package",
         "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per package",
         "MetricName": "C6_Pkg_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per package",
         "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per package",
         "MetricName": "C7_Pkg_Residency"
     }
 ]
index f8bbe087b0f8afc78825a5ace22d380aefe2c081..52a105666afcbc99a88401b1ac9a435ab1b7b292 100644 (file)
@@ -77,7 +77,8 @@
         "UMask": "0x21",
         "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Locked load uops retired (Precise event capable)"
+        "BriefDescription": "Locked load uops retired (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
@@ -88,7 +89,8 @@
         "UMask": "0x41",
         "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
+        "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "UMask": "0x42",
         "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
+        "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "UMask": "0x43",
         "EventName": "MEM_UOPS_RETIRED.SPLIT",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
+        "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "UMask": "0x81",
         "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Load uops retired (Precise event capable)"
+        "BriefDescription": "Load uops retired (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "UMask": "0x82",
         "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Store uops retired (Precise event capable)"
+        "BriefDescription": "Store uops retired (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "UMask": "0x83",
         "EventName": "MEM_UOPS_RETIRED.ALL",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Memory uops retired (Precise event capable)"
+        "BriefDescription": "Memory uops retired (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "UMask": "0x1",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)"
+        "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "UMask": "0x2",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
+        "BriefDescription": "Load uops retired that hit L2 (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "UMask": "0x8",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
+        "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "UMask": "0x10",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Load uops retired that missed L2 (Precise event capable)"
+        "BriefDescription": "Load uops retired that missed L2 (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "UMask": "0x20",
         "EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)"
+        "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "UMask": "0x40",
         "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Loads retired that hit WCB (Precise event capable)"
+        "BriefDescription": "Loads retired that hit WCB (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "UMask": "0x80",
         "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Loads retired that came from DRAM (Precise event capable)"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x40000032b7 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
-        "MSRIndex": "0x1a6",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
-        "Offcore": "1"
+        "BriefDescription": "Loads retired that came from DRAM (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x36000032b7 ",
+        "MSRValue": "0x36000032b7",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.ANY",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x10000032b7 ",
+        "MSRValue": "0x10000032b7",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HITM_OTHER_CORE",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x04000032b7 ",
+        "MSRValue": "0x04000032b7",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HIT_OTHER_CORE_NO_FWD",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x02000032b7 ",
+        "MSRValue": "0x02000032b7",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x00000432b7 ",
+        "MSRValue": "0x00000432b7",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT",
         "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that hit the L2 cache.",
         "Offcore": "1"
     },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x00000132b7 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x4000000022 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
-        "MSRIndex": "0x1a6",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
-        "Offcore": "1"
-    },
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x3600000022 ",
+        "MSRValue": "0x3600000022",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.ANY",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x1000000022 ",
+        "MSRValue": "0x1000000022",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0400000022 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0200000022 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. ",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000040022 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that hit the L2 cache.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000010022 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data reads (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x4000003091",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
-        "MSRIndex": "0x1a6",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x3600003091",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.ANY",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x1000003091",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0400003091",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0200003091",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. ",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data reads (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000043091",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads (demand & prefetch) that hit the L2 cache.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data reads (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000013091",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads (demand & prefetch) that have any transaction responses from the uncore subsystem.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x4000003010 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.OUTSTANDING",
-        "MSRIndex": "0x1a6",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x3600003010 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.ANY",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x1000003010 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0400003010 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0200003010 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module. ",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000043010 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that hit the L2 cache.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000013010 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that have any transaction responses from the uncore subsystem.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts requests to the uncore subsystem that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x4000008000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
-        "MSRIndex": "0x1a6",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts requests to the uncore subsystem that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x3600008000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.ANY",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x1000008000 ",
+        "MSRValue": "0x0400000022",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0400008000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0200008000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module. ",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts requests to the uncore subsystem that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000048000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts requests to the uncore subsystem that hit the L2 cache.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts requests to the uncore subsystem that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000018000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts requests to the uncore subsystem that have any transaction responses from the uncore subsystem.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x4000004800 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.OUTSTANDING",
-        "MSRIndex": "0x1a6",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x3600004800 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.ANY",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x1000004800 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0400004800 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0200004800 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that true miss for the L2 cache with a snoop miss in the other processor module. ",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000044800 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that hit the L2 cache.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000014800 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that have any transaction responses from the uncore subsystem.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x4000004000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.OUTSTANDING",
-        "MSRIndex": "0x1a6",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x3600004000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.ANY",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x1000004000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0400004000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0200004000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that true miss for the L2 cache with a snoop miss in the other processor module. ",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000044000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_HIT",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that hit the L2 cache.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000014000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that have any transaction responses from the uncore subsystem.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x4000002000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
-        "MSRIndex": "0x1a6",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x3600002000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.ANY",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x1000002000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0400002000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0200002000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. ",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000042000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that hit the L2 cache.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000012000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that have any transaction responses from the uncore subsystem.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cache lines requests by software prefetch instructions that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x4000001000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.OUTSTANDING",
-        "MSRIndex": "0x1a6",
+        "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+        "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache lines requests by software prefetch instructions that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+        "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x3600001000 ",
+        "MSRValue": "0x0200000022",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.ANY",
+        "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache.",
+        "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x1000001000 ",
+        "MSRValue": "0x0000040022",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE",
+        "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+        "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that hit the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0400001000 ",
+        "MSRValue": "0x3600003091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+        "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.ANY",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+        "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0200001000 ",
+        "MSRValue": "0x1000003091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+        "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cache lines requests by software prefetch instructions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000041000 ",
+        "MSRValue": "0x0400003091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT",
+        "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache lines requests by software prefetch instructions that hit the L2 cache.",
+        "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cache lines requests by software prefetch instructions that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000011000 ",
+        "MSRValue": "0x0200003091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.ANY_RESPONSE",
+        "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache lines requests by software prefetch instructions that have any transaction responses from the uncore subsystem.",
+        "BriefDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data reads (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000800 ",
+        "MSRValue": "0x0000043091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.OUTSTANDING",
-        "MSRIndex": "0x1a6",
+        "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT",
+        "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+        "BriefDescription": "Counts data reads (demand & prefetch) that hit the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x3600000800 ",
+        "MSRValue": "0x3600003010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.ANY",
+        "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.ANY",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache.",
+        "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x1000000800 ",
+        "MSRValue": "0x1000003010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
+        "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+        "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0400000800 ",
+        "MSRValue": "0x0400003010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+        "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+        "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0200000800 ",
+        "MSRValue": "0x0200003010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+        "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000040800 ",
+        "MSRValue": "0x0000043010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT",
+        "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that hit the L2 cache.",
+        "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that hit the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010800 ",
+        "MSRValue": "0x1000008000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
+        "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that have any transaction responses from the uncore subsystem.",
+        "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts bus lock and split lock requests that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000400 ",
+        "MSRValue": "0x0400008000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
-        "MSRIndex": "0x1a6",
+        "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+        "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts bus lock and split lock requests that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+        "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x3600000400 ",
+        "MSRValue": "0x0200008000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.ANY",
+        "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache.",
+        "BriefDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts requests to the uncore subsystem that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x1000000400 ",
+        "MSRValue": "0x0000048000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HITM_OTHER_CORE",
+        "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+        "BriefDescription": "Counts requests to the uncore subsystem that hit the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts requests to the uncore subsystem that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0400000400 ",
+        "MSRValue": "0x0000018000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+        "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+        "BriefDescription": "Counts requests to the uncore subsystem that have any transaction responses from the uncore subsystem.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts bus lock and split lock requests that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0200000400 ",
+        "MSRValue": "0x3600004800",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+        "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.ANY",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts bus lock and split lock requests that true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts bus lock and split lock requests that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000040400 ",
+        "MSRValue": "0x0000044800",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT",
+        "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts bus lock and split lock requests that hit the L2 cache.",
+        "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that hit the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts bus lock and split lock requests that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010400 ",
+        "MSRValue": "0x3600004000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
+        "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.ANY",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts bus lock and split lock requests that have any transaction responses from the uncore subsystem.",
+        "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts code reads in uncacheable (UC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000200 ",
+        "MSRValue": "0x1000004000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.OUTSTANDING",
-        "MSRIndex": "0x1a6",
+        "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
+        "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts code reads in uncacheable (UC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+        "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x3600000200 ",
+        "MSRValue": "0x0400004000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.ANY",
+        "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache.",
+        "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x1000000200 ",
+        "MSRValue": "0x0200004000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.HITM_OTHER_CORE",
+        "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+        "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0400000200 ",
+        "MSRValue": "0x0000044000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+        "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_HIT",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+        "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that hit the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts code reads in uncacheable (UC) memory region that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0200000200 ",
+        "MSRValue": "0x3600002000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+        "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.ANY",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts code reads in uncacheable (UC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts code reads in uncacheable (UC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000040200 ",
+        "MSRValue": "0x1000002000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_HIT",
+        "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts code reads in uncacheable (UC) memory region that hit the L2 cache.",
+        "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts code reads in uncacheable (UC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010200 ",
+        "MSRValue": "0x0400002000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.ANY_RESPONSE",
+        "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts code reads in uncacheable (UC) memory region that have any transaction responses from the uncore subsystem.",
+        "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000100 ",
+        "MSRValue": "0x0200002000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.OUTSTANDING",
-        "MSRIndex": "0x1a6",
+        "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+        "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+        "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x3600000100 ",
+        "MSRValue": "0x0000042000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.ANY",
+        "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache.",
+        "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that hit the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x1000000100 ",
+        "MSRValue": "0x3600001000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.HITM_OTHER_CORE",
+        "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.ANY",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+        "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0400000100 ",
+        "MSRValue": "0x1000001000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+        "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+        "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0200000100 ",
+        "MSRValue": "0x0400001000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+        "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000040100 ",
+        "MSRValue": "0x0200001000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT",
+        "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that hit the L2 cache.",
+        "BriefDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts data cache lines requests by software prefetch instructions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010100 ",
+        "MSRValue": "0x0000041000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.ANY_RESPONSE",
+        "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that have any transaction responses from the uncore subsystem.",
+        "BriefDescription": "Counts data cache lines requests by software prefetch instructions that hit the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000080 ",
+        "MSRValue": "0x3600000800",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING",
-        "MSRIndex": "0x1a6",
+        "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.ANY",
+        "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+        "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x3600000080 ",
+        "MSRValue": "0x1000000800",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.ANY",
+        "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache.",
+        "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x1000000080 ",
+        "MSRValue": "0x0400000800",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.HITM_OTHER_CORE",
+        "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+        "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0400000080 ",
+        "MSRValue": "0x0200000800",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+        "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+        "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0200000080 ",
+        "MSRValue": "0x0000040800",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+        "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that hit the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts bus lock and split lock requests that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000040080 ",
+        "MSRValue": "0x0000010400",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT",
+        "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that hit the L2 cache.",
+        "BriefDescription": "Counts bus lock and split lock requests that have any transaction responses from the uncore subsystem.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010080 ",
+        "MSRValue": "0x3600000100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.ANY_RESPONSE",
+        "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.ANY",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that have any transaction responses from the uncore subsystem.",
+        "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+        "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000020 ",
+        "MSRValue": "0x3600000080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.OUTSTANDING",
-        "MSRIndex": "0x1a6",
+        "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.ANY",
+        "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+        "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x3600000020 ",
+        "MSRValue": "0x3600000020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.ANY",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x1000000020 ",
+        "MSRValue": "0x1000000020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0400000020 ",
+        "MSRValue": "0x0400000020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0200000020 ",
+        "MSRValue": "0x0200000020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000040020 ",
+        "MSRValue": "0x0000040020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT",
         "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that hit the L2 cache.",
         "Offcore": "1"
     },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000010020 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that have any transaction responses from the uncore subsystem.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x4000000010 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.OUTSTANDING",
-        "MSRIndex": "0x1a6",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
-        "Offcore": "1"
-    },
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x3600000010 ",
+        "MSRValue": "0x3600000010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.ANY",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x1000000010 ",
+        "MSRValue": "0x1000000010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0400000010 ",
+        "MSRValue": "0x0400000010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0200000010 ",
+        "MSRValue": "0x0200000010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000040010 ",
+        "MSRValue": "0x0000040010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_HIT",
         "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that hit the L2 cache.",
         "Offcore": "1"
     },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000010010 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that have any transaction responses from the uncore subsystem.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x4000000008 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.COREWB.OUTSTANDING",
-        "MSRIndex": "0x1a6",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
-        "Offcore": "1"
-    },
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x3600000008 ",
+        "MSRValue": "0x3600000008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.ANY",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x1000000008 ",
+        "MSRValue": "0x1000000008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HITM_OTHER_CORE",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0400000008 ",
+        "MSRValue": "0x0400000008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HIT_OTHER_CORE_NO_FWD",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0200000008 ",
+        "MSRValue": "0x0200000008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
         "MSRIndex": "0x1a6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000040008 ",
+        "MSRValue": "0x0000040008",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.COREWB.L2_HIT",
         "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that hit the L2 cache.",
         "Offcore": "1"
     },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000010008 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
-        "MSRIndex": "0x1a6",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that have any transaction responses from the uncore subsystem.",
-        "Offcore": "1"
-    },
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000004 ",
+        "MSRValue": "0x4000000004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x3600000004 ",
+        "MSRValue": "0x3600000004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.ANY",
         "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache.",
         "Offcore": "1"
     },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x1000000004 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
-        "Offcore": "1"
-    },
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0400000004 ",
+        "MSRValue": "0x0400000004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0200000004 ",
+        "MSRValue": "0x0200000004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000040004 ",
+        "MSRValue": "0x0000040004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT",
         "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that hit the L2 cache.",
         "Offcore": "1"
     },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000010004 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that have any transaction responses from the uncore subsystem.",
-        "Offcore": "1"
-    },
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000002 ",
+        "MSRValue": "0x4000000002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x3600000002 ",
+        "MSRValue": "0x3600000002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.ANY",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x1000000002 ",
+        "MSRValue": "0x1000000002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0400000002 ",
+        "MSRValue": "0x0400000002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0200000002 ",
+        "MSRValue": "0x0200000002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000040002 ",
+        "MSRValue": "0x0000040002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT",
         "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that hit the L2 cache.",
         "Offcore": "1"
     },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000010002 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that have any transaction responses from the uncore subsystem.",
-        "Offcore": "1"
-    },
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand cacheable data reads of full cache lines that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000001 ",
+        "MSRValue": "0x4000000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x3600000001 ",
+        "MSRValue": "0x3600000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.ANY",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x1000000001 ",
+        "MSRValue": "0x1000000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0400000001 ",
+        "MSRValue": "0x0400000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module.  Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0200000001 ",
+        "MSRValue": "0x0200000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
         "EventCode": "0xB7",
-        "MSRValue": "0x0000040001 ",
+        "MSRValue": "0x0000040001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT",
         "SampleAfterValue": "100007",
         "BriefDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache.",
         "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts demand cacheable data reads of full cache lines that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x0000010001 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand cacheable data reads of full cache lines that have any transaction responses from the uncore subsystem.",
-        "Offcore": "1"
     }
 ]
\ No newline at end of file
index 690cebd12a94b6087bb4a527649ecdea13dc4b8e..197dc76d49ddc75a9716a6ed52cab105e2bce07a 100644 (file)
         "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
         "SampleAfterValue": "200003",
         "BriefDescription": "Machine clears due to memory ordering issue"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x20000032b7 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000000022 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000003091",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000003010 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000008000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000004800 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000004000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region  that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000002000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000001000 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000000800 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000000400 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000000200 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000000100 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000000080 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000000020 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000000010 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000000008 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000000004 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000000002 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
-    },
-    {
-        "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
-        "EventCode": "0xB7",
-        "MSRValue": "0x2000000001 ",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.NON_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache and targets non-DRAM system address.",
-        "Offcore": "1"
     }
 ]
\ No newline at end of file
index 254788af8ab6771f845752e6c52744b8b77a90ef..6342368accf8a45c1869bae95ac1259db30a104e 100644 (file)
@@ -1,7 +1,6 @@
 [
     {
         "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers.  This event uses fixed counter 0.  You cannot collect a PEBs record for this event.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 0",
         "UMask": "0x1",
         "EventName": "INST_RETIRED.ANY",
@@ -10,7 +9,6 @@
     },
     {
         "PublicDescription": "Counts the number of core cycles while the core is not in a halt state.  The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time.  This event uses fixed counter 1.  You cannot collect a PEBs record for this event.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 1",
         "UMask": "0x2",
         "EventName": "CPU_CLK_UNHALTED.CORE",
@@ -19,7 +17,6 @@
     },
     {
         "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction.  In mobile systems the core frequency may change from time.  This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time.  This event uses fixed counter 2.  You cannot collect a PEBs record for this event.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 2",
         "UMask": "0x3",
         "EventName": "CPU_CLK_UNHALTED.REF_TSC",
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification.  Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.",
+        "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification.  Self-modifying code (SMC) causes a severe penalty in all Intel\u00ae architecture processors.",
         "EventCode": "0xC3",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
index 9805198d3f5f1fd157fc9b860cde38217d94f129..343d66bbd777003ee602bf7933319085c321c38f 100644 (file)
@@ -48,7 +48,8 @@
         "UMask": "0x11",
         "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)"
+        "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
@@ -59,7 +60,8 @@
         "UMask": "0x12",
         "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)"
+        "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
@@ -70,6 +72,7 @@
         "UMask": "0x13",
         "EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)"
+        "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)",
+        "Data_LA": "1"
     }
 ]
\ No newline at end of file
index b4791b443a6678e2da59ad890023d1db40f143d6..5a6ac8285ad4bfe385a932bfd36ef763c4d93e79 100644 (file)
@@ -92,7 +92,8 @@
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Locked load uops retired (Precise event capable)"
+        "BriefDescription": "Locked load uops retired (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
+        "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
+        "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.SPLIT",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
+        "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Load uops retired (Precise event capable)"
+        "BriefDescription": "Load uops retired (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Store uops retired (Precise event capable)"
+        "BriefDescription": "Store uops retired (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.ALL",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Memory uops retired (Precise event capable)"
+        "BriefDescription": "Memory uops retired (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)"
+        "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
+        "BriefDescription": "Load uops retired that hit L2 (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
+        "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Load uops retired that missed L2 (Precise event capable)"
+        "BriefDescription": "Load uops retired that missed L2 (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)"
+        "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Loads retired that hit WCB (Precise event capable)"
+        "BriefDescription": "Loads retired that hit WCB (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Loads retired that came from DRAM (Precise event capable)"
+        "BriefDescription": "Loads retired that came from DRAM (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "CollectPEBSRecord": "1",
         "PDIR_COUNTER": "na",
         "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "PDIR_COUNTER": "na",
         "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "PDIR_COUNTER": "na",
         "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "PDIR_COUNTER": "na",
         "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "PDIR_COUNTER": "na",
         "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "PDIR_COUNTER": "na",
         "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "PDIR_COUNTER": "na",
         "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "PDIR_COUNTER": "na",
         "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "PDIR_COUNTER": "na",
         "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "PDIR_COUNTER": "na",
         "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "PDIR_COUNTER": "na",
         "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region  true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "PDIR_COUNTER": "na",
         "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "PDIR_COUNTER": "na",
         "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "PDIR_COUNTER": "na",
         "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "PDIR_COUNTER": "na",
         "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
         "PDIR_COUNTER": "na",
         "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
+        "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
         "Offcore": "1"
     },
     {
index ccf1aed69197bd77459db10b8f7ce7aebc182f90..e3fa1a0ba71b63560867a99b014cc6f8c803513a 100644 (file)
@@ -3,7 +3,6 @@
         "PEBS": "2",
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers.  This event uses fixed counter 0.  You cannot collect a PEBs record for this event.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 0",
         "UMask": "0x1",
         "PEBScounters": "32",
@@ -15,7 +14,6 @@
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts the number of core cycles while the core is not in a halt state.  The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time.  This event uses fixed counter 1.  You cannot collect a PEBs record for this event.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 1",
         "UMask": "0x2",
         "PEBScounters": "33",
@@ -27,7 +25,6 @@
     {
         "CollectPEBSRecord": "1",
         "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction.  In mobile systems the core frequency may change from time.  This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time.  This event uses fixed counter 2.  You cannot collect a PEBs record for this event.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 2",
         "UMask": "0x3",
         "PEBScounters": "34",
     },
     {
         "CollectPEBSRecord": "1",
-        "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification.  Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.",
+        "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification.  Self-modifying code (SMC) causes a severe penalty in all Intel\u00ae architecture processors.",
         "EventCode": "0xC3",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
index 0b53a3b0dfb87074aeab59bfd0a538e65dbf06da..0d32fd26ded14e6cd700e38a73089c9b39273d90 100644 (file)
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)"
+        "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)"
+        "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)",
+        "Data_LA": "1"
     },
     {
         "PEBS": "2",
         "PEBScounters": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)"
+        "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)",
+        "Data_LA": "1"
     }
 ]
\ No newline at end of file
index da4d6ddd4f924d5e8527c45668151ce17cfcb346..7fb0ad8d8ca1da023d55d201c731da3d2d5ab7c4 100644 (file)
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Demand data read requests that hit L2 cache.",
+        "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
         "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x41",
+        "UMask": "0xc1",
         "Errata": "HSD78",
         "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
         "SampleAfterValue": "200003",
@@ -77,7 +77,7 @@
         "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
         "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x42",
+        "UMask": "0xc2",
         "EventName": "L2_RQSTS.RFO_HIT",
         "SampleAfterValue": "200003",
         "BriefDescription": "RFO requests that hit L2 cache",
@@ -87,7 +87,7 @@
         "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
         "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x44",
+        "UMask": "0xc4",
         "EventName": "L2_RQSTS.CODE_RD_HIT",
         "SampleAfterValue": "200003",
         "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
@@ -97,7 +97,7 @@
         "PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
         "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x50",
+        "UMask": "0xd0",
         "EventName": "L2_RQSTS.L2_PF_HIT",
         "SampleAfterValue": "200003",
         "BriefDescription": "L2 prefetch requests that hit L2 cache",
         "Errata": "HSD29, HSD25, HSM26, HSM30",
         "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
         "SampleAfterValue": "20011",
-        "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. ",
+        "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
         "CounterHTOff": "0,1,2,3",
         "Data_LA": "1"
     },
         "Errata": "HSD29, HSD25, HSM26, HSM30",
         "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
         "SampleAfterValue": "20011",
-        "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. ",
+        "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
         "CounterHTOff": "0,1,2,3",
         "Data_LA": "1"
     },
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "",
         "EventCode": "0xf4",
         "Counter": "0,1,2,3",
         "UMask": "0x10",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all requests hit in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c8fff",
+        "MSRValue": "0x3F803C8FFF",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.L3_HIT.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all requests that hit in the L3",
+        "BriefDescription": "Counts all requests hit in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c07f7",
+        "MSRValue": "0x10003C07F7",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+        "BriefDescription": "hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c07f7",
+        "MSRValue": "0x04003C07F7",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "BriefDescription": "hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0244",
+        "MSRValue": "0x04003C0244",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0122",
+        "MSRValue": "0x10003C0122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+        "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0122",
+        "MSRValue": "0x04003C0122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0091",
+        "MSRValue": "0x10003C0091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+        "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0091",
+        "MSRValue": "0x04003C0091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0200",
+        "MSRValue": "0x3F803C0200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs  that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0100",
+        "MSRValue": "0x3F803C0100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs  that hit in the L3",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0080",
+        "MSRValue": "0x3F803C0080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0040",
+        "MSRValue": "0x3F803C0040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0020",
+        "MSRValue": "0x3F803C0020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3f803c0010",
+        "MSRValue": "0x3F803C0010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0004",
+        "MSRValue": "0x10003C0004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+        "BriefDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0004",
+        "MSRValue": "0x04003C0004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "BriefDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0002",
+        "MSRValue": "0x10003C0002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+        "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0002",
+        "MSRValue": "0x04003C0002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10003c0001",
+        "MSRValue": "0x10003C0001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+        "BriefDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04003c0001",
+        "MSRValue": "0x04003C0001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "BriefDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     }
index f9843e5a9b429e612c49b21921122a7c9940fb3f..f5a3beaa19fc8d743e7772d4b3b9a2aa1394fea0 100644 (file)
@@ -1,22 +1,26 @@
 [
     {
+        "PEBS": "1",
+        "PublicDescription": "",
         "EventCode": "0xC1",
         "Counter": "0,1,2,3",
         "UMask": "0x8",
         "Errata": "HSD56, HSM57",
         "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+        "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
+        "PEBS": "1",
+        "PublicDescription": "",
         "EventCode": "0xC1",
         "Counter": "0,1,2,3",
         "UMask": "0x10",
         "Errata": "HSD56, HSM57",
         "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+        "BriefDescription": "Number of transitions from legacy SSE to AVX-256 when penalty applicable",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Number of X87 FP assists due to output values.",
+        "PEBS": "1",
+        "PublicDescription": "",
         "EventCode": "0xCA",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
         "EventName": "FP_ASSIST.X87_OUTPUT",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Number of X87 assists due to output value.",
+        "BriefDescription": "output - Numeric Overflow, Numeric Underflow, Inexact Result",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Number of X87 FP assists due to input values.",
+        "PEBS": "1",
+        "PublicDescription": "",
         "EventCode": "0xCA",
         "Counter": "0,1,2,3",
         "UMask": "0x4",
         "EventName": "FP_ASSIST.X87_INPUT",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Number of X87 assists due to input value.",
+        "BriefDescription": "input - Invalid Operation, Denormal Operand, SNaN Operand",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Number of SIMD FP assists due to output values.",
+        "PEBS": "1",
+        "PublicDescription": "",
         "EventCode": "0xCA",
         "Counter": "0,1,2,3",
         "UMask": "0x8",
         "EventName": "FP_ASSIST.SIMD_OUTPUT",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Number of SIMD FP assists due to Output values",
+        "BriefDescription": "SSE* FP micro-code assist when output value is invalid.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Number of SIMD FP assists due to input values.",
+        "PEBS": "1",
+        "PublicDescription": "",
         "EventCode": "0xCA",
         "Counter": "0,1,2,3",
         "UMask": "0x10",
         "EventName": "FP_ASSIST.SIMD_INPUT",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Number of SIMD FP assists due to input values",
+        "BriefDescription": "Any input SSE* FP Assist",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Cycles with any input/output SSE* or FP assists.",
+        "PEBS": "1",
+        "PublicDescription": "",
         "EventCode": "0xCA",
         "Counter": "0,1,2,3",
         "UMask": "0x1e",
         "EventName": "FP_ASSIST.ANY",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Cycles with any input/output SSE or FP assist",
+        "BriefDescription": "Counts any FP_ASSIST umask was incrementing",
         "CounterMask": "1",
         "CounterHTOff": "0,1,2,3"
     }
index 5ab5c78fe5805ec8a0925abe925af7ba73aa5c50..21b27488b6214bca09b3a016ce25e18e228dda73 100644 (file)
 [
     {
-        "BriefDescription": "Instructions Per Cycle (per logical thread)",
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Frontend_Bound"
+    },
+    {
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Frontend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Bad_Speculation"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Bad_Speculation_SMT"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Backend_Bound"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Backend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Retiring"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Retiring_SMT"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Instructions Per Cycle (per logical thread)",
         "MetricGroup": "TopDownL1",
         "MetricName": "IPC"
     },
     {
-        "BriefDescription": "Uops Per Instruction",
         "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
-        "MetricGroup": "Pipeline",
+        "BriefDescription": "Uops Per Instruction",
+        "MetricGroup": "Pipeline;Retiring",
         "MetricName": "UPI"
     },
     {
-        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Instruction per taken branch",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "IpTB"
+    },
+    {
+        "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Branch instructions per taken branch. ",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "BpTB"
+    },
+    {
         "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
-        "MetricGroup": "Frontend",
+        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+        "MetricGroup": "PGO",
         "MetricName": "IFetch_Line_Utilization"
     },
     {
-        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
-        "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
-        "MetricGroup": "DSB; Frontend_Bandwidth",
+        "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+        "MetricGroup": "DSB;Frontend_Bandwidth",
         "MetricName": "DSB_Coverage"
     },
     {
-        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricGroup": "Pipeline;Summary",
         "MetricName": "CPI"
     },
     {
-        "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
         "MetricGroup": "Summary",
         "MetricName": "CLKS"
     },
     {
-        "BriefDescription": "Total issue-pipeline slots",
-        "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+        "MetricExpr": "4 * cycles",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
         "MetricGroup": "TopDownL1",
         "MetricName": "SLOTS"
     },
     {
-        "BriefDescription": "Total number of retired Instructions",
+        "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
+        "MetricGroup": "TopDownL1_SMT",
+        "MetricName": "SLOTS_SMT"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+        "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+        "MetricGroup": "Instruction_Type;L1_Bound",
+        "MetricName": "IpL"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+        "BriefDescription": "Instructions per Store",
+        "MetricGroup": "Instruction_Type;Store_Bound",
+        "MetricName": "IpS"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Instructions per Branch",
+        "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+        "MetricName": "IpB"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+        "BriefDescription": "Instruction per (near) call",
+        "MetricGroup": "Branches",
+        "MetricName": "IpCall"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY",
+        "BriefDescription": "Total number of retired Instructions",
         "MetricGroup": "Summary",
         "MetricName": "Instructions"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / cycles",
         "BriefDescription": "Instructions Per Cycle (per physical core)",
-        "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
         "MetricGroup": "SMT",
         "MetricName": "CoreIPC"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Instructions Per Cycle (per physical core)",
+        "MetricGroup": "SMT",
+        "MetricName": "CoreIPC_SMT"
+    },
+    {
+        "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
         "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
-        "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
         "MetricGroup": "Pipeline;Ports_Utilization",
         "MetricName": "ILP"
     },
     {
-        "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
-        "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL  - (( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION )) ) / RS_EVENTS.EMPTY_END)",
-        "MetricGroup": "Unknown_Branches",
-        "MetricName": "BAClear_Cost"
+        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+        "MetricGroup": "Branch_Mispredicts",
+        "MetricName": "IpMispredict"
     },
     {
+        "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
         "BriefDescription": "Core actual clocks when any thread is active on the physical core",
-        "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
         "MetricGroup": "SMT",
         "MetricName": "CORE_CLKS"
     },
     {
-        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
         "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
         "MetricGroup": "Memory_Bound;Memory_Lat",
         "MetricName": "Load_Miss_Real_Latency"
     },
     {
-        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
-        "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+        "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
         "MetricGroup": "Memory_Bound;Memory_BW",
         "MetricName": "MLP"
     },
     {
+        "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
         "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
-        "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
         "MetricGroup": "TLB",
         "MetricName": "Page_Walks_Utilization"
     },
     {
-        "BriefDescription": "Average CPU Utilization",
+        "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+        "MetricGroup": "TLB_SMT",
+        "MetricName": "Page_Walks_Utilization_SMT"
+    },
+    {
+        "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L1D_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L2_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+        "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L3_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L1MPKI"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2HPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L3MPKI"
+    },
+    {
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+        "BriefDescription": "Average CPU Utilization",
         "MetricGroup": "Summary",
         "MetricName": "CPU_Utilization"
     },
     {
-        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricGroup": "Power",
         "MetricName": "Turbo_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricGroup": "SMT;Summary",
         "MetricName": "SMT_2T_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
     {
-        "BriefDescription": "C3 residency percent per core",
+        "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+        "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "DRAM_BW_Use"
+    },
+    {
         "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per core",
         "MetricName": "C3_Core_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per core",
         "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per core",
         "MetricName": "C6_Core_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per core",
         "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per core",
         "MetricName": "C7_Core_Residency"
     },
     {
-        "BriefDescription": "C2 residency percent per package",
         "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C2 residency percent per package",
         "MetricName": "C2_Pkg_Residency"
     },
     {
-        "BriefDescription": "C3 residency percent per package",
         "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per package",
         "MetricName": "C3_Pkg_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per package",
         "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per package",
         "MetricName": "C6_Pkg_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per package",
         "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per package",
         "MetricName": "C7_Pkg_Residency"
     }
 ]
index e5f9fa6655b30c9bdff4c256c87206c9d60c160b..ef13ed88e2eae681a0e84676c7979fd46db9b04c 100644 (file)
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Loads with latency value being above 4.",
+        "BriefDescription": "Randomly selected loads with latency value being above 4.",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "50021",
-        "BriefDescription": "Loads with latency value being above 8.",
+        "BriefDescription": "Randomly selected loads with latency value being above 8.",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "20011",
-        "BriefDescription": "Loads with latency value being above 16.",
+        "BriefDescription": "Randomly selected loads with latency value being above 16.",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Loads with latency value being above 32.",
+        "BriefDescription": "Randomly selected loads with latency value being above 32.",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "2003",
-        "BriefDescription": "Loads with latency value being above 64.",
+        "BriefDescription": "Randomly selected loads with latency value being above 64.",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "1009",
-        "BriefDescription": "Loads with latency value being above 128.",
+        "BriefDescription": "Randomly selected loads with latency value being above 128.",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "503",
-        "BriefDescription": "Loads with latency value being above 256.",
+        "BriefDescription": "Randomly selected loads with latency value being above 256.",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "101",
-        "BriefDescription": "Loads with latency value being above 512.",
+        "BriefDescription": "Randomly selected loads with latency value being above 512.",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
     {
-        "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all requests miss in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fffc08fff",
+        "MSRValue": "0x3FFFC08FFF",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.L3_MISS.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all requests that miss in the L3",
+        "BriefDescription": "Counts all requests miss in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "miss the L3 and the data is returned from local dram",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01004007f7",
+        "MSRValue": "0x01004007F7",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.LOCAL_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram",
+        "BriefDescription": "miss the L3 and the data is returned from local dram",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "miss in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fffc007f7",
+        "MSRValue": "0x3FFFC007F7",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3",
+        "BriefDescription": "miss in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
         "EventCode": "0xB7, 0xBB",
         "MSRValue": "0x0100400244",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.LOCAL_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram",
+        "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch code reads miss in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fffc00244",
+        "MSRValue": "0x3FFFC00244",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch code reads that miss in the L3",
+        "BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
         "EventCode": "0xB7, 0xBB",
         "MSRValue": "0x0100400122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.LOCAL_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram",
+        "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fffc00122",
+        "MSRValue": "0x3FFFC00122",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3",
+        "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
         "EventCode": "0xB7, 0xBB",
         "MSRValue": "0x0100400091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.LOCAL_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram",
+        "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads miss in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fffc00091",
+        "MSRValue": "0x3FFFC00091",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3",
+        "BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fffc00200",
+        "MSRValue": "0x3FFFC00200",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs  that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fffc00100",
+        "MSRValue": "0x3FFFC00100",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs  that miss in the L3",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fffc00080",
+        "MSRValue": "0x3FFFC00080",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fffc00040",
+        "MSRValue": "0x3FFFC00040",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fffc00020",
+        "MSRValue": "0x3FFFC00020",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fffc00010",
+        "MSRValue": "0x3FFFC00010",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
         "EventCode": "0xB7, 0xBB",
         "MSRValue": "0x0100400004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.LOCAL_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram",
+        "BriefDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads miss in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fffc00004",
+        "MSRValue": "0x3FFFC00004",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand code reads that miss in the L3",
+        "BriefDescription": "Counts all demand code reads miss in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
         "EventCode": "0xB7, 0xBB",
         "MSRValue": "0x0100400002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.LOCAL_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram",
+        "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fffc00002",
+        "MSRValue": "0x3FFFC00002",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3",
+        "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
         "EventCode": "0xB7, 0xBB",
         "MSRValue": "0x0100400001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.LOCAL_DRAM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram",
+        "BriefDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts demand data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads miss in the L3",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fffc00001",
+        "MSRValue": "0x3FFFC00001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that miss in the L3",
+        "BriefDescription": "Counts demand data reads miss in the L3",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     }
index a4dcfce4a512f99aa1900d038fd5674dbce3cced..734d3873729e80481fc173026e13189f314528f1 100644 (file)
@@ -1,7 +1,6 @@
 [
     {
         "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 0",
         "UMask": "0x1",
         "Errata": "HSD140, HSD143",
@@ -12,7 +11,6 @@
     },
     {
         "PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 1",
         "UMask": "0x2",
         "EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -21,7 +19,6 @@
         "CounterHTOff": "Fixed counter 1"
     },
     {
-        "EventCode": "0x00",
         "Counter": "Fixed counter 1",
         "UMask": "0x2",
         "AnyThread": "1",
@@ -32,7 +29,6 @@
     },
     {
         "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 2",
         "UMask": "0x3",
         "EventName": "CPU_CLK_UNHALTED.REF_TSC",
         "CounterHTOff": "1"
     },
     {
-        "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+        "PEBS": "1",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
         "EventCode": "0xC0",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Number of microcode assists invoked by HW upon uop writeback.",
+        "PEBS": "1",
+        "PublicDescription": "",
         "EventCode": "0xC1",
         "Counter": "0,1,2,3",
         "UMask": "0x40",
         "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "Data_LA": "1"
     },
     {
+        "PEBS": "1",
+        "PublicDescription": "",
         "EventCode": "0xC2",
         "Invert": "1",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "UOPS_RETIRED.STALL_CYCLES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles without actually retired uops.",
+        "BriefDescription": "Cycles no executable uops retired",
         "CounterMask": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
+        "PEBS": "1",
+        "PublicDescription": "",
         "EventCode": "0xC2",
         "Invert": "1",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles with less than 10 actually retired uops.",
+        "BriefDescription": "Number of cycles using always true condition applied to  PEBS uops retired event.",
         "CounterMask": "10",
         "CounterHTOff": "0,1,2,3"
     },
     {
+        "PEBS": "1",
+        "PublicDescription": "",
         "EventCode": "0xC2",
         "Invert": "1",
         "Counter": "0,1,2,3",
         "AnyThread": "1",
         "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles without actually retired uops.",
+        "BriefDescription": "Cycles no executable uops retired on core",
         "CounterMask": "1",
         "CounterHTOff": "0,1,2,3"
     },
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Counts the number of not taken branch instructions retired.",
+        "PEBS": "1",
+        "PublicDescription": "",
         "EventCode": "0xC4",
         "Counter": "0,1,2,3",
         "UMask": "0x10",
         "EventName": "BR_INST_RETIRED.NOT_TAKEN",
         "SampleAfterValue": "400009",
-        "BriefDescription": "Not taken branch instructions retired.",
+        "BriefDescription": "Counts all not taken macro branch instructions retired.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Number of far branches retired.",
+        "PEBS": "1",
+        "PublicDescription": "",
         "EventCode": "0xC4",
         "Counter": "0,1,2,3",
         "UMask": "0x40",
         "EventName": "BR_INST_RETIRED.FAR_BRANCH",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Far branch instructions retired.",
+        "BriefDescription": "Counts the number of far branch instructions retired.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
index b2fbd617306acd693d8651535cd2d70ad09899c2..a9e62d4357af06352bff37bebf6460a8cfb8c2e3 100644 (file)
     },
     {
         "EventCode": "0x24",
-        "UMask": "0x41",
+        "UMask": "0xc1",
         "BriefDescription": "Demand Data Read requests that hit L2 cache",
         "Counter": "0,1,2,3",
         "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
         "Errata": "HSD78",
-        "PublicDescription": "Demand data read requests that hit L2 cache.",
+        "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
         "SampleAfterValue": "200003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x24",
-        "UMask": "0x42",
+        "UMask": "0xc2",
         "BriefDescription": "RFO requests that hit L2 cache",
         "Counter": "0,1,2,3",
         "EventName": "L2_RQSTS.RFO_HIT",
@@ -85,7 +85,7 @@
     },
     {
         "EventCode": "0x24",
-        "UMask": "0x44",
+        "UMask": "0xc4",
         "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
         "Counter": "0,1,2,3",
         "EventName": "L2_RQSTS.CODE_RD_HIT",
@@ -95,7 +95,7 @@
     },
     {
         "EventCode": "0x24",
-        "UMask": "0x50",
+        "UMask": "0xd0",
         "BriefDescription": "L2 prefetch requests that hit L2 cache",
         "Counter": "0,1,2,3",
         "EventName": "L2_RQSTS.L2_PF_HIT",
     {
         "EventCode": "0xD0",
         "UMask": "0x11",
-        "BriefDescription": "Retired load uops that miss the STLB. (precise Event)",
+        "BriefDescription": "Retired load uops that miss the STLB.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
     {
         "EventCode": "0xD0",
         "UMask": "0x12",
-        "BriefDescription": "Retired store uops that miss the STLB. (precise Event)",
+        "BriefDescription": "Retired store uops that miss the STLB.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
     {
         "EventCode": "0xD0",
         "UMask": "0x21",
-        "BriefDescription": "Retired load uops with locked access. (precise Event)",
+        "BriefDescription": "Retired load uops with locked access.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
     {
         "EventCode": "0xD0",
         "UMask": "0x41",
-        "BriefDescription": "Retired load uops that split across a cacheline boundary. (precise Event)",
+        "BriefDescription": "Retired load uops that split across a cacheline boundary.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
         "Errata": "HSD29, HSM30",
-        "PublicDescription": "This event counts load uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD0",
         "UMask": "0x42",
-        "BriefDescription": "Retired store uops that split across a cacheline boundary. (precise Event)",
+        "BriefDescription": "Retired store uops that split across a cacheline boundary.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
         "Errata": "HSD29, HSM30",
         "L1_Hit_Indication": "1",
-        "PublicDescription": "This event counts store uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD0",
         "UMask": "0x81",
-        "BriefDescription": "All retired load uops. (precise Event)",
+        "BriefDescription": "All retired load uops.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
     {
         "EventCode": "0xD0",
         "UMask": "0x82",
-        "BriefDescription": "All retired store uops. (precise Event)",
+        "BriefDescription": "All retired store uops.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
         "Errata": "HSD29, HSM30",
         "L1_Hit_Indication": "1",
-        "PublicDescription": "This event counts all store uops retired. This is a precise event.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD1",
         "UMask": "0x4",
-        "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+        "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
         "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
-        "PublicDescription": "This event counts retired load uops in which data sources were data hits in the L3 cache without snoops required. This does not include hardware prefetches. This is a precise event.",
+        "PublicDescription": "Retired load uops with L3 cache hits as data sources.",
         "SampleAfterValue": "50021",
         "CounterHTOff": "0,1,2,3"
     },
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
         "Errata": "HSM30",
-        "PublicDescription": "This event counts retired load uops in which data sources missed in the L1 cache. This does not include hardware prefetches. This is a precise event.",
+        "PublicDescription": "Retired load uops missed L1 cache as data sources.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD1",
         "UMask": "0x10",
-        "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
+        "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
         "Errata": "HSD29, HSM30",
+        "PublicDescription": "Retired load uops missed L2. Unknown data source excluded.",
         "SampleAfterValue": "50021",
         "CounterHTOff": "0,1,2,3"
     },
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
         "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
+        "PublicDescription": "Retired load uops missed L3. Excludes unknown data source .",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD2",
         "UMask": "0x2",
-        "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. ",
+        "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
         "Errata": "HSD29, HSD25, HSM26, HSM30",
-        "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HIT in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
         "SampleAfterValue": "20011",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD2",
         "UMask": "0x4",
-        "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. ",
+        "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
         "Errata": "HSD29, HSD25, HSM26, HSM30",
-        "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HITM (hit modified) in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
         "SampleAfterValue": "20011",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD3",
         "UMask": "0x1",
+        "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
         "Errata": "HSD74, HSD29, HSD25, HSM30",
-        "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
+        "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xD3",
         "UMask": "0x4",
-        "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)",
+        "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
     {
         "EventCode": "0xD3",
         "UMask": "0x10",
-        "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)",
+        "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
     {
         "EventCode": "0xD3",
         "UMask": "0x20",
-        "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)",
+        "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
         "Data_LA": "1",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "BriefDescription": "Split locks in SQ",
         "Counter": "0,1,2,3",
         "EventName": "SQ_MISC.SPLIT_LOCK",
-        "PublicDescription": "",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
-        "MSRValue": "0x04003c0001",
+        "BriefDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "MSRValue": "0x04003C0001",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
-        "MSRValue": "0x10003c0001",
+        "BriefDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+        "MSRValue": "0x10003C0001",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
-        "MSRValue": "0x04003c0002",
+        "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "MSRValue": "0x04003C0002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
-        "MSRValue": "0x10003c0002",
+        "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+        "MSRValue": "0x10003C0002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
-        "MSRValue": "0x04003c0004",
+        "BriefDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "MSRValue": "0x04003C0004",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
-        "MSRValue": "0x10003c0004",
+        "BriefDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+        "MSRValue": "0x10003C0004",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3",
-        "MSRValue": "0x3f803c0010",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
+        "MSRValue": "0x3F803C0010",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3",
-        "MSRValue": "0x3f803c0020",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
+        "MSRValue": "0x3F803C0020",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3",
-        "MSRValue": "0x3f803c0040",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
+        "MSRValue": "0x3F803C0040",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3",
-        "MSRValue": "0x3f803c0080",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
+        "MSRValue": "0x3F803C0080",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3",
-        "MSRValue": "0x3f803c0100",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
+        "MSRValue": "0x3F803C0100",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3",
-        "MSRValue": "0x3f803c0200",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
+        "MSRValue": "0x3F803C0200",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
-        "MSRValue": "0x04003c0091",
+        "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "MSRValue": "0x04003C0091",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
-        "MSRValue": "0x10003c0091",
+        "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+        "MSRValue": "0x10003C0091",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
-        "MSRValue": "0x04003c0122",
+        "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "MSRValue": "0x04003C0122",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
-        "MSRValue": "0x10003c0122",
+        "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+        "MSRValue": "0x10003C0122",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
-        "MSRValue": "0x04003c0244",
+        "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "MSRValue": "0x04003C0244",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
-        "MSRValue": "0x04003c07f7",
+        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+        "MSRValue": "0x04003C07F7",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
-        "MSRValue": "0x10003c07f7",
+        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+        "MSRValue": "0x10003C07F7",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all requests that hit in the L3",
-        "MSRValue": "0x3f803c8fff",
+        "BriefDescription": "Counts all requests hit in the L3",
+        "MSRValue": "0x3F803C8FFF",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all requests hit in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     }
index 5ab5c78fe5805ec8a0925abe925af7ba73aa5c50..e5aac148c9419ae76b61e9480132720d35dc8ff7 100644 (file)
 [
     {
-        "BriefDescription": "Instructions Per Cycle (per logical thread)",
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Frontend_Bound"
+    },
+    {
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Frontend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Bad_Speculation"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Bad_Speculation_SMT"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Backend_Bound"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Backend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Retiring"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Retiring_SMT"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Instructions Per Cycle (per logical thread)",
         "MetricGroup": "TopDownL1",
         "MetricName": "IPC"
     },
     {
-        "BriefDescription": "Uops Per Instruction",
         "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
-        "MetricGroup": "Pipeline",
+        "BriefDescription": "Uops Per Instruction",
+        "MetricGroup": "Pipeline;Retiring",
         "MetricName": "UPI"
     },
     {
-        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Instruction per taken branch",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "IpTB"
+    },
+    {
+        "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Branch instructions per taken branch. ",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "BpTB"
+    },
+    {
         "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
-        "MetricGroup": "Frontend",
+        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+        "MetricGroup": "PGO",
         "MetricName": "IFetch_Line_Utilization"
     },
     {
-        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
-        "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
-        "MetricGroup": "DSB; Frontend_Bandwidth",
+        "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+        "MetricGroup": "DSB;Frontend_Bandwidth",
         "MetricName": "DSB_Coverage"
     },
     {
-        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricGroup": "Pipeline;Summary",
         "MetricName": "CPI"
     },
     {
-        "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
         "MetricGroup": "Summary",
         "MetricName": "CLKS"
     },
     {
-        "BriefDescription": "Total issue-pipeline slots",
-        "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+        "MetricExpr": "4 * cycles",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
         "MetricGroup": "TopDownL1",
         "MetricName": "SLOTS"
     },
     {
-        "BriefDescription": "Total number of retired Instructions",
+        "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
+        "MetricGroup": "TopDownL1_SMT",
+        "MetricName": "SLOTS_SMT"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+        "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+        "MetricGroup": "Instruction_Type;L1_Bound",
+        "MetricName": "IpL"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+        "BriefDescription": "Instructions per Store",
+        "MetricGroup": "Instruction_Type;Store_Bound",
+        "MetricName": "IpS"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Instructions per Branch",
+        "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+        "MetricName": "IpB"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+        "BriefDescription": "Instruction per (near) call",
+        "MetricGroup": "Branches",
+        "MetricName": "IpCall"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY",
+        "BriefDescription": "Total number of retired Instructions",
         "MetricGroup": "Summary",
         "MetricName": "Instructions"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / cycles",
         "BriefDescription": "Instructions Per Cycle (per physical core)",
-        "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
         "MetricGroup": "SMT",
         "MetricName": "CoreIPC"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Instructions Per Cycle (per physical core)",
+        "MetricGroup": "SMT",
+        "MetricName": "CoreIPC_SMT"
+    },
+    {
+        "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
         "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
-        "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
         "MetricGroup": "Pipeline;Ports_Utilization",
         "MetricName": "ILP"
     },
     {
-        "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
-        "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL  - (( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION )) ) / RS_EVENTS.EMPTY_END)",
-        "MetricGroup": "Unknown_Branches",
-        "MetricName": "BAClear_Cost"
+        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+        "MetricGroup": "Branch_Mispredicts",
+        "MetricName": "IpMispredict"
     },
     {
+        "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
         "BriefDescription": "Core actual clocks when any thread is active on the physical core",
-        "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
         "MetricGroup": "SMT",
         "MetricName": "CORE_CLKS"
     },
     {
-        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
         "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
         "MetricGroup": "Memory_Bound;Memory_Lat",
         "MetricName": "Load_Miss_Real_Latency"
     },
     {
-        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
-        "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+        "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
         "MetricGroup": "Memory_Bound;Memory_BW",
         "MetricName": "MLP"
     },
     {
+        "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
         "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
-        "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
         "MetricGroup": "TLB",
         "MetricName": "Page_Walks_Utilization"
     },
     {
-        "BriefDescription": "Average CPU Utilization",
+        "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+        "MetricGroup": "TLB_SMT",
+        "MetricName": "Page_Walks_Utilization_SMT"
+    },
+    {
+        "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L1D_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L2_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+        "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L3_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L1MPKI"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2HPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L3MPKI"
+    },
+    {
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+        "BriefDescription": "Average CPU Utilization",
         "MetricGroup": "Summary",
         "MetricName": "CPU_Utilization"
     },
     {
-        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricGroup": "Power",
         "MetricName": "Turbo_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricGroup": "SMT;Summary",
         "MetricName": "SMT_2T_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
     {
-        "BriefDescription": "C3 residency percent per core",
+        "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+        "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "DRAM_BW_Use"
+    },
+    {
+        "MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )",
+        "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+        "MetricGroup": "Memory_Lat",
+        "MetricName": "DRAM_Read_Latency"
+    },
+    {
+        "MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@",
+        "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "DRAM_Parallel_Reads"
+    },
+    {
+        "MetricExpr": "cbox_0@event\\=0x0@",
+        "BriefDescription": "Socket actual clocks when any core is active on that socket",
+        "MetricGroup": "",
+        "MetricName": "Socket_CLKS"
+    },
+    {
         "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per core",
         "MetricName": "C3_Core_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per core",
         "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per core",
         "MetricName": "C6_Core_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per core",
         "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per core",
         "MetricName": "C7_Core_Residency"
     },
     {
-        "BriefDescription": "C2 residency percent per package",
         "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C2 residency percent per package",
         "MetricName": "C2_Pkg_Residency"
     },
     {
-        "BriefDescription": "C3 residency percent per package",
         "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per package",
         "MetricName": "C3_Pkg_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per package",
         "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per package",
         "MetricName": "C6_Pkg_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per package",
         "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per package",
         "MetricName": "C7_Pkg_Residency"
     }
 ]
index 56b0f24b8029bf705efb3690244c411c9d2d6f36..a42d5ce86b6f4ae4358f9c2fc69054ec63f2197a 100644 (file)
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Loads with latency value being above 4.",
+        "BriefDescription": "Randomly selected loads with latency value being above 4.",
         "PEBS": "2",
         "MSRValue": "0x4",
         "Counter": "3",
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Loads with latency value being above 8.",
+        "BriefDescription": "Randomly selected loads with latency value being above 8.",
         "PEBS": "2",
         "MSRValue": "0x8",
         "Counter": "3",
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Loads with latency value being above 16.",
+        "BriefDescription": "Randomly selected loads with latency value being above 16.",
         "PEBS": "2",
         "MSRValue": "0x10",
         "Counter": "3",
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Loads with latency value being above 32.",
+        "BriefDescription": "Randomly selected loads with latency value being above 32.",
         "PEBS": "2",
         "MSRValue": "0x20",
         "Counter": "3",
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Loads with latency value being above 64.",
+        "BriefDescription": "Randomly selected loads with latency value being above 64.",
         "PEBS": "2",
         "MSRValue": "0x40",
         "Counter": "3",
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Loads with latency value being above 128.",
+        "BriefDescription": "Randomly selected loads with latency value being above 128.",
         "PEBS": "2",
         "MSRValue": "0x80",
         "Counter": "3",
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Loads with latency value being above 256.",
+        "BriefDescription": "Randomly selected loads with latency value being above 256.",
         "PEBS": "2",
         "MSRValue": "0x100",
         "Counter": "3",
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Loads with latency value being above 512.",
+        "BriefDescription": "Randomly selected loads with latency value being above 512.",
         "PEBS": "2",
         "MSRValue": "0x200",
         "Counter": "3",
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts demand data reads that miss in the L3",
-        "MSRValue": "0x3fbfc00001",
+        "BriefDescription": "Counts demand data reads miss in the L3",
+        "MSRValue": "0x3FBFC00001",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts demand data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram",
+        "BriefDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
         "MSRValue": "0x0600400001",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3",
-        "MSRValue": "0x3fbfc00002",
+        "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
+        "MSRValue": "0x3FBFC00002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram",
+        "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
         "MSRValue": "0x0600400002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.LOCAL_DRAM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache",
-        "MSRValue": "0x103fc00002",
+        "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
+        "MSRValue": "0x103FC00002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand code reads that miss in the L3",
-        "MSRValue": "0x3fbfc00004",
+        "BriefDescription": "Counts all demand code reads miss in the L3",
+        "MSRValue": "0x3FBFC00004",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram",
+        "BriefDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
         "MSRValue": "0x0600400004",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3",
-        "MSRValue": "0x3fbfc00010",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
+        "MSRValue": "0x3FBFC00010",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3",
-        "MSRValue": "0x3fbfc00020",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
+        "MSRValue": "0x3FBFC00020",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3",
-        "MSRValue": "0x3fbfc00040",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
+        "MSRValue": "0x3FBFC00040",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3",
-        "MSRValue": "0x3fbfc00080",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
+        "MSRValue": "0x3FBFC00080",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3",
-        "MSRValue": "0x3fbfc00100",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
+        "MSRValue": "0x3FBFC00100",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3",
-        "MSRValue": "0x3fbfc00200",
+        "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
+        "MSRValue": "0x3FBFC00200",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3",
-        "MSRValue": "0x3fbfc00091",
+        "BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
+        "MSRValue": "0x3FBFC00091",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram",
+        "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
         "MSRValue": "0x0600400091",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram",
-        "MSRValue": "0x063f800091",
+        "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
+        "MSRValue": "0x063F800091",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache",
-        "MSRValue": "0x103fc00091",
+        "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
+        "MSRValue": "0x103FC00091",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache",
-        "MSRValue": "0x083fc00091",
+        "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
+        "MSRValue": "0x083FC00091",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3",
-        "MSRValue": "0x3fbfc00122",
+        "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
+        "MSRValue": "0x3FBFC00122",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram",
+        "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
         "MSRValue": "0x0600400122",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch code reads that miss in the L3",
-        "MSRValue": "0x3fbfc00244",
+        "BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
+        "MSRValue": "0x3FBFC00244",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch code reads miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram",
+        "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
         "MSRValue": "0x0600400244",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3",
-        "MSRValue": "0x3fbfc007f7",
+        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
+        "MSRValue": "0x3FBFC007F7",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram",
-        "MSRValue": "0x06004007f7",
+        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
+        "MSRValue": "0x06004007F7",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram",
-        "MSRValue": "0x063f8007f7",
+        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
+        "MSRValue": "0x063F8007F7",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache",
-        "MSRValue": "0x103fc007f7",
+        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
+        "MSRValue": "0x103FC007F7",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache",
-        "MSRValue": "0x083fc007f7",
+        "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
+        "MSRValue": "0x083FC007F7",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all requests that miss in the L3",
-        "MSRValue": "0x3fbfc08fff",
+        "BriefDescription": "Counts all requests miss in the L3",
+        "MSRValue": "0x3FBFC08FFF",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts all requests miss in the L3",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     }
index 8a18bfe9e3e4dd2806010f4039946c859206bdd8..26f2888341ee03edd0024bddcbd5781a239bf5ce 100644 (file)
@@ -1,6 +1,5 @@
 [
     {
-        "EventCode": "0x00",
         "UMask": "0x1",
         "BriefDescription": "Instructions retired from execution.",
         "Counter": "Fixed counter 0",
@@ -11,7 +10,6 @@
         "CounterHTOff": "Fixed counter 0"
     },
     {
-        "EventCode": "0x00",
         "UMask": "0x2",
         "BriefDescription": "Core cycles when the thread is not in halt state.",
         "Counter": "Fixed counter 1",
@@ -21,7 +19,6 @@
         "CounterHTOff": "Fixed counter 1"
     },
     {
-        "EventCode": "0x00",
         "UMask": "0x2",
         "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
         "Counter": "Fixed counter 1",
@@ -31,7 +28,6 @@
         "CounterHTOff": "Fixed counter 1"
     },
     {
-        "EventCode": "0x00",
         "UMask": "0x3",
         "BriefDescription": "Reference cycles when the core is not in halt state.",
         "Counter": "Fixed counter 2",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "UOPS_RETIRED.ALL",
+        "PublicDescription": "Counts the number of micro-ops retired. Use Cmask=1 and invert to count active cycles or stalled cycles.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+        "PublicDescription": "This event counts the number of retirement slots used each cycle.  There are potentially 4 slots that can be used each cycle - meaning, 4 uops or 4 instructions could retire each cycle.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "BR_INST_RETIRED.CONDITIONAL",
+        "PublicDescription": "Counts the number of conditional branch instructions retired.",
         "SampleAfterValue": "400009",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+        "PublicDescription": "Counts the number of near return instructions retired.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+        "PublicDescription": "Number of near taken branches retired.",
         "SampleAfterValue": "400009",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+        "PublicDescription": "Number of near branch instructions retired that were taken but mispredicted.",
         "SampleAfterValue": "400009",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
index 999a01bc64670de135fc37e82ecc0f4494cf22bf..5f6cb2abc3840162de9ba0db0ee25eadbe60eab9 100644 (file)
         "EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address ",
+        "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand data reads ",
+        "BriefDescription": "Counts all demand data reads",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand rfo's ",
+        "BriefDescription": "Counts all demand rfo's",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all demand & prefetch prefetch RFOs ",
+        "BriefDescription": "Counts all demand & prefetch prefetch RFOs",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts all data/code/rfo references (demand & prefetch) ",
+        "BriefDescription": "Counts all data/code/rfo references (demand & prefetch)",
         "CounterHTOff": "0,1,2,3"
     }
 ]
\ No newline at end of file
index 7c2679514efb40559aea735843b099c3d6eec2d5..bc4d5fc284a0001096754ad12f98fb62cead1dae 100644 (file)
 [
     {
-        "BriefDescription": "Instructions Per Cycle (per logical thread)",
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Frontend_Bound"
+    },
+    {
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Frontend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Bad_Speculation"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Bad_Speculation_SMT"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Backend_Bound"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Backend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Retiring"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Retiring_SMT"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Instructions Per Cycle (per logical thread)",
         "MetricGroup": "TopDownL1",
         "MetricName": "IPC"
     },
     {
-        "BriefDescription": "Uops Per Instruction",
         "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
-        "MetricGroup": "Pipeline",
+        "BriefDescription": "Uops Per Instruction",
+        "MetricGroup": "Pipeline;Retiring",
         "MetricName": "UPI"
     },
     {
-        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
-        "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
-        "MetricGroup": "Frontend",
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Instruction per taken branch",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "IpTB"
+    },
+    {
+        "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Branch instructions per taken branch. ",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "BpTB"
+    },
+    {
+        "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
+        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+        "MetricGroup": "PGO",
         "MetricName": "IFetch_Line_Utilization"
     },
     {
-        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
-        "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
-        "MetricGroup": "DSB; Frontend_Bandwidth",
+        "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+        "MetricGroup": "DSB;Frontend_Bandwidth",
         "MetricName": "DSB_Coverage"
     },
     {
-        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricGroup": "Pipeline;Summary",
         "MetricName": "CPI"
     },
     {
-        "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
         "MetricGroup": "Summary",
         "MetricName": "CLKS"
     },
     {
-        "BriefDescription": "Total issue-pipeline slots",
-        "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+        "MetricExpr": "4 * cycles",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
         "MetricGroup": "TopDownL1",
         "MetricName": "SLOTS"
     },
     {
-        "BriefDescription": "Total number of retired Instructions",
+        "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
+        "MetricGroup": "TopDownL1_SMT",
+        "MetricName": "SLOTS_SMT"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+        "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+        "MetricGroup": "Instruction_Type;L1_Bound",
+        "MetricName": "IpL"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+        "BriefDescription": "Instructions per Store",
+        "MetricGroup": "Instruction_Type;Store_Bound",
+        "MetricName": "IpS"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Instructions per Branch",
+        "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+        "MetricName": "IpB"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+        "BriefDescription": "Instruction per (near) call",
+        "MetricGroup": "Branches",
+        "MetricName": "IpCall"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY",
+        "BriefDescription": "Total number of retired Instructions",
         "MetricGroup": "Summary",
         "MetricName": "Instructions"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / cycles",
         "BriefDescription": "Instructions Per Cycle (per physical core)",
-        "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
         "MetricGroup": "SMT",
         "MetricName": "CoreIPC"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Instructions Per Cycle (per physical core)",
+        "MetricGroup": "SMT",
+        "MetricName": "CoreIPC_SMT"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS",
+        "MetricName": "FLOPc"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS_SMT",
+        "MetricName": "FLOPc_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
         "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
-        "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
         "MetricGroup": "Pipeline;Ports_Utilization",
         "MetricName": "ILP"
     },
     {
-        "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
-        "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END)",
-        "MetricGroup": "Unknown_Branches",
-        "MetricName": "BAClear_Cost"
+        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+        "MetricGroup": "Branch_Mispredicts",
+        "MetricName": "IpMispredict"
     },
     {
+        "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
         "BriefDescription": "Core actual clocks when any thread is active on the physical core",
-        "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
         "MetricGroup": "SMT",
         "MetricName": "CORE_CLKS"
     },
     {
-        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
         "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
         "MetricGroup": "Memory_Bound;Memory_Lat",
         "MetricName": "Load_Miss_Real_Latency"
     },
     {
-        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
-        "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+        "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
         "MetricGroup": "Memory_Bound;Memory_BW",
         "MetricName": "MLP"
     },
     {
+        "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
         "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
-        "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
         "MetricGroup": "TLB",
         "MetricName": "Page_Walks_Utilization"
     },
     {
-        "BriefDescription": "Average CPU Utilization",
+        "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+        "MetricGroup": "TLB_SMT",
+        "MetricName": "Page_Walks_Utilization_SMT"
+    },
+    {
+        "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L1D_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L2_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+        "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L3_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L1MPKI"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2HPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L3MPKI"
+    },
+    {
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+        "BriefDescription": "Average CPU Utilization",
         "MetricGroup": "Summary",
         "MetricName": "CPU_Utilization"
     },
     {
+        "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
         "BriefDescription": "Giga Floating Point Operations Per Second",
-        "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
         "MetricGroup": "FLOPS;Summary",
         "MetricName": "GFLOPs"
     },
     {
-        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricGroup": "Power",
         "MetricName": "Turbo_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricGroup": "SMT;Summary",
         "MetricName": "SMT_2T_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
     {
-        "BriefDescription": "C3 residency percent per core",
+        "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+        "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "DRAM_BW_Use"
+    },
+    {
         "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per core",
         "MetricName": "C3_Core_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per core",
         "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per core",
         "MetricName": "C6_Core_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per core",
         "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per core",
         "MetricName": "C7_Core_Residency"
     },
     {
-        "BriefDescription": "C2 residency percent per package",
         "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C2 residency percent per package",
         "MetricName": "C2_Pkg_Residency"
     },
     {
-        "BriefDescription": "C3 residency percent per package",
         "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per package",
         "MetricName": "C3_Pkg_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per package",
         "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per package",
         "MetricName": "C6_Pkg_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per package",
         "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per package",
         "MetricName": "C7_Pkg_Residency"
     }
 ]
index 0afbfd95ea306de86e4791359899d33171fb5ad0..2a0aad91d83d05e320e3cb57bfbd6e77614dc031 100644 (file)
@@ -1,6 +1,5 @@
 [
     {
-        "EventCode": "0x00",
         "Counter": "Fixed counter 0",
         "UMask": "0x1",
         "EventName": "INST_RETIRED.ANY",
@@ -9,7 +8,6 @@
         "CounterHTOff": "Fixed counter 0"
     },
     {
-        "EventCode": "0x00",
         "Counter": "Fixed counter 1",
         "UMask": "0x2",
         "EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -19,7 +17,6 @@
     },
     {
         "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 1",
         "UMask": "0x2",
         "AnyThread": "1",
@@ -29,7 +26,6 @@
         "CounterHTOff": "Fixed counter 1"
     },
     {
-        "EventCode": "0x00",
         "Counter": "Fixed counter 2",
         "UMask": "0x3",
         "EventName": "CPU_CLK_UNHALTED.REF_TSC",
index 7c2679514efb40559aea735843b099c3d6eec2d5..f3874b5f99953216deadbd7ff0304eb14605c3bc 100644 (file)
 [
     {
-        "BriefDescription": "Instructions Per Cycle (per logical thread)",
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Frontend_Bound"
+    },
+    {
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Frontend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Bad_Speculation"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Bad_Speculation_SMT"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Backend_Bound"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Backend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Retiring"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Retiring_SMT"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Instructions Per Cycle (per logical thread)",
         "MetricGroup": "TopDownL1",
         "MetricName": "IPC"
     },
     {
-        "BriefDescription": "Uops Per Instruction",
         "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
-        "MetricGroup": "Pipeline",
+        "BriefDescription": "Uops Per Instruction",
+        "MetricGroup": "Pipeline;Retiring",
         "MetricName": "UPI"
     },
     {
-        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
-        "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
-        "MetricGroup": "Frontend",
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Instruction per taken branch",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "IpTB"
+    },
+    {
+        "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Branch instructions per taken branch. ",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "BpTB"
+    },
+    {
+        "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
+        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+        "MetricGroup": "PGO",
         "MetricName": "IFetch_Line_Utilization"
     },
     {
-        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
-        "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
-        "MetricGroup": "DSB; Frontend_Bandwidth",
+        "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+        "MetricGroup": "DSB;Frontend_Bandwidth",
         "MetricName": "DSB_Coverage"
     },
     {
-        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricGroup": "Pipeline;Summary",
         "MetricName": "CPI"
     },
     {
-        "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
         "MetricGroup": "Summary",
         "MetricName": "CLKS"
     },
     {
-        "BriefDescription": "Total issue-pipeline slots",
-        "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+        "MetricExpr": "4 * cycles",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
         "MetricGroup": "TopDownL1",
         "MetricName": "SLOTS"
     },
     {
-        "BriefDescription": "Total number of retired Instructions",
+        "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
+        "MetricGroup": "TopDownL1_SMT",
+        "MetricName": "SLOTS_SMT"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+        "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+        "MetricGroup": "Instruction_Type;L1_Bound",
+        "MetricName": "IpL"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+        "BriefDescription": "Instructions per Store",
+        "MetricGroup": "Instruction_Type;Store_Bound",
+        "MetricName": "IpS"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Instructions per Branch",
+        "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+        "MetricName": "IpB"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+        "BriefDescription": "Instruction per (near) call",
+        "MetricGroup": "Branches",
+        "MetricName": "IpCall"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY",
+        "BriefDescription": "Total number of retired Instructions",
         "MetricGroup": "Summary",
         "MetricName": "Instructions"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / cycles",
         "BriefDescription": "Instructions Per Cycle (per physical core)",
-        "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
         "MetricGroup": "SMT",
         "MetricName": "CoreIPC"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Instructions Per Cycle (per physical core)",
+        "MetricGroup": "SMT",
+        "MetricName": "CoreIPC_SMT"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS",
+        "MetricName": "FLOPc"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS_SMT",
+        "MetricName": "FLOPc_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
         "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
-        "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
         "MetricGroup": "Pipeline;Ports_Utilization",
         "MetricName": "ILP"
     },
     {
-        "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
-        "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END)",
-        "MetricGroup": "Unknown_Branches",
-        "MetricName": "BAClear_Cost"
+        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+        "MetricGroup": "Branch_Mispredicts",
+        "MetricName": "IpMispredict"
     },
     {
+        "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
         "BriefDescription": "Core actual clocks when any thread is active on the physical core",
-        "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
         "MetricGroup": "SMT",
         "MetricName": "CORE_CLKS"
     },
     {
-        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
         "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
         "MetricGroup": "Memory_Bound;Memory_Lat",
         "MetricName": "Load_Miss_Real_Latency"
     },
     {
-        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
-        "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+        "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
         "MetricGroup": "Memory_Bound;Memory_BW",
         "MetricName": "MLP"
     },
     {
+        "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
         "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
-        "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
         "MetricGroup": "TLB",
         "MetricName": "Page_Walks_Utilization"
     },
     {
-        "BriefDescription": "Average CPU Utilization",
+        "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+        "MetricGroup": "TLB_SMT",
+        "MetricName": "Page_Walks_Utilization_SMT"
+    },
+    {
+        "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L1D_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L2_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+        "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L3_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L1MPKI"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2HPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L3MPKI"
+    },
+    {
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+        "BriefDescription": "Average CPU Utilization",
         "MetricGroup": "Summary",
         "MetricName": "CPU_Utilization"
     },
     {
+        "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
         "BriefDescription": "Giga Floating Point Operations Per Second",
-        "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
         "MetricGroup": "FLOPS;Summary",
         "MetricName": "GFLOPs"
     },
     {
-        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricGroup": "Power",
         "MetricName": "Turbo_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricGroup": "SMT;Summary",
         "MetricName": "SMT_2T_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
     {
-        "BriefDescription": "C3 residency percent per core",
+        "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+        "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "DRAM_BW_Use"
+    },
+    {
+        "MetricExpr": "cbox_0@event\\=0x0@",
+        "BriefDescription": "Socket actual clocks when any core is active on that socket",
+        "MetricGroup": "",
+        "MetricName": "Socket_CLKS"
+    },
+    {
         "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per core",
         "MetricName": "C3_Core_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per core",
         "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per core",
         "MetricName": "C6_Core_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per core",
         "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per core",
         "MetricName": "C7_Core_Residency"
     },
     {
-        "BriefDescription": "C2 residency percent per package",
         "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C2 residency percent per package",
         "MetricName": "C2_Pkg_Residency"
     },
     {
-        "BriefDescription": "C3 residency percent per package",
         "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per package",
         "MetricName": "C3_Pkg_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per package",
         "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per package",
         "MetricName": "C6_Pkg_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per package",
         "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per package",
         "MetricName": "C7_Pkg_Residency"
     }
 ]
index 0afbfd95ea306de86e4791359899d33171fb5ad0..2a0aad91d83d05e320e3cb57bfbd6e77614dc031 100644 (file)
@@ -1,6 +1,5 @@
 [
     {
-        "EventCode": "0x00",
         "Counter": "Fixed counter 0",
         "UMask": "0x1",
         "EventName": "INST_RETIRED.ANY",
@@ -9,7 +8,6 @@
         "CounterHTOff": "Fixed counter 0"
     },
     {
-        "EventCode": "0x00",
         "Counter": "Fixed counter 1",
         "UMask": "0x2",
         "EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -19,7 +17,6 @@
     },
     {
         "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 1",
         "UMask": "0x2",
         "AnyThread": "1",
@@ -29,7 +26,6 @@
         "CounterHTOff": "Fixed counter 1"
     },
     {
-        "EventCode": "0x00",
         "Counter": "Fixed counter 2",
         "UMask": "0x3",
         "EventName": "CPU_CLK_UNHALTED.REF_TSC",
index ee22e4a5e30d2c19d183dc1f9f9270113d25e24d..52dc6ef40e635c123e7620d1edbe948392ee39d2 100644 (file)
@@ -31,7 +31,7 @@
     },
     {
         "PEBS": "1",
-        "PublicDescription": "This event counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+        "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
         "EventCode": "0xD0",
         "Counter": "0,1,2,3",
         "UMask": "0x41",
@@ -42,7 +42,7 @@
     },
     {
         "PEBS": "1",
-        "PublicDescription": "This event counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+        "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
         "EventCode": "0xD0",
         "Counter": "0,1,2,3",
         "UMask": "0x42",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "This event counts L1D data line replacements.  Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.  ",
+        "PublicDescription": "This event counts L1D data line replacements.  Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.",
         "EventCode": "0x51",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
index fd7d7c438226b9d5cbdc1878e1306cc8fddc68b8..98c73e430b05b56567a0310057ecd7b2b04a10bb 100644 (file)
 [
     {
-        "BriefDescription": "Instructions Per Cycle (per logical thread)",
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Frontend_Bound"
+    },
+    {
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Frontend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Bad_Speculation"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Bad_Speculation_SMT"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Backend_Bound"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Backend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Retiring"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Retiring_SMT"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Instructions Per Cycle (per logical thread)",
         "MetricGroup": "TopDownL1",
         "MetricName": "IPC"
     },
     {
-        "BriefDescription": "Uops Per Instruction",
         "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
-        "MetricGroup": "Pipeline",
+        "BriefDescription": "Uops Per Instruction",
+        "MetricGroup": "Pipeline;Retiring",
         "MetricName": "UPI"
     },
     {
-        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
-        "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
-        "MetricGroup": "Frontend",
+        "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
+        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+        "MetricGroup": "PGO",
         "MetricName": "IFetch_Line_Utilization"
     },
     {
-        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
-        "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
-        "MetricGroup": "DSB; Frontend_Bandwidth",
+        "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+        "MetricGroup": "DSB;Frontend_Bandwidth",
         "MetricName": "DSB_Coverage"
     },
     {
-        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricGroup": "Pipeline;Summary",
         "MetricName": "CPI"
     },
     {
-        "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
         "MetricGroup": "Summary",
         "MetricName": "CLKS"
     },
     {
-        "BriefDescription": "Total issue-pipeline slots",
-        "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+        "MetricExpr": "4 * cycles",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
         "MetricGroup": "TopDownL1",
         "MetricName": "SLOTS"
     },
     {
-        "BriefDescription": "Total number of retired Instructions",
+        "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
+        "MetricGroup": "TopDownL1_SMT",
+        "MetricName": "SLOTS_SMT"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY",
+        "BriefDescription": "Total number of retired Instructions",
         "MetricGroup": "Summary",
         "MetricName": "Instructions"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / cycles",
         "BriefDescription": "Instructions Per Cycle (per physical core)",
-        "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
         "MetricGroup": "SMT",
         "MetricName": "CoreIPC"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Instructions Per Cycle (per physical core)",
+        "MetricGroup": "SMT",
+        "MetricName": "CoreIPC_SMT"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS",
+        "MetricName": "FLOPc"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS_SMT",
+        "MetricName": "FLOPc_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
         "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
-        "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
         "MetricGroup": "Pipeline;Ports_Utilization",
         "MetricName": "ILP"
     },
     {
+        "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
         "BriefDescription": "Core actual clocks when any thread is active on the physical core",
-        "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
         "MetricGroup": "SMT",
         "MetricName": "CORE_CLKS"
     },
     {
-        "BriefDescription": "Average CPU Utilization",
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+        "BriefDescription": "Average CPU Utilization",
         "MetricGroup": "Summary",
         "MetricName": "CPU_Utilization"
     },
     {
+        "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
         "BriefDescription": "Giga Floating Point Operations Per Second",
-        "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
         "MetricGroup": "FLOPS;Summary",
         "MetricName": "GFLOPs"
     },
     {
-        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricGroup": "Power",
         "MetricName": "Turbo_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricGroup": "SMT;Summary",
         "MetricName": "SMT_2T_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
     {
-        "BriefDescription": "C3 residency percent per core",
+        "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+        "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "DRAM_BW_Use"
+    },
+    {
+        "MetricExpr": "cbox_0@event\\=0x0@",
+        "BriefDescription": "Socket actual clocks when any core is active on that socket",
+        "MetricGroup": "",
+        "MetricName": "Socket_CLKS"
+    },
+    {
         "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per core",
         "MetricName": "C3_Core_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per core",
         "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per core",
         "MetricName": "C6_Core_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per core",
         "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per core",
         "MetricName": "C7_Core_Residency"
     },
     {
-        "BriefDescription": "C2 residency percent per package",
         "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C2 residency percent per package",
         "MetricName": "C2_Pkg_Residency"
     },
     {
-        "BriefDescription": "C3 residency percent per package",
         "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per package",
         "MetricName": "C3_Pkg_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per package",
         "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per package",
         "MetricName": "C6_Pkg_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per package",
         "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per package",
         "MetricName": "C7_Pkg_Residency"
     }
 ]
index 34a519d9bfa045add6274c830b573ad3d2e4058d..783a5b4a67b19725ae4cb5600c84af141cf666aa 100644 (file)
@@ -1,7 +1,6 @@
 [
     {
-        "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. ",
-        "EventCode": "0x00",
+        "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.",
         "Counter": "Fixed counter 1",
         "UMask": "0x1",
         "EventName": "INST_RETIRED.ANY",
@@ -10,8 +9,7 @@
         "CounterHTOff": "Fixed counter 1"
     },
     {
-        "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ",
-        "EventCode": "0x00",
+        "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
         "Counter": "Fixed counter 2",
         "UMask": "0x2",
         "EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -20,8 +18,7 @@
         "CounterHTOff": "Fixed counter 2"
     },
     {
-        "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ",
-        "EventCode": "0x00",
+        "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
         "Counter": "Fixed counter 3",
         "UMask": "0x3",
         "EventName": "CPU_CLK_UNHALTED.REF_TSC",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load.  The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store.  See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual.  The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
+        "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load.  The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store.  See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual.  The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
         "EventCode": "0x03",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x00",
         "Counter": "Fixed counter 2",
         "UMask": "0x2",
         "AnyThread": "1",
index e434ec723001897f2fb0030586df52d21eafd3b1..e847b0fd696df6cc46ee9ede94e9310d4b9608c6 100644 (file)
         "BriefDescription": "Counts the number of L2 cache misses"
     },
     {
-        "PublicDescription": "This event counts the number of core cycles the fetch stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses. ",
+        "PublicDescription": "This event counts the number of core cycles the fetch stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses.",
         "EventCode": "0x86",
         "Counter": "0,1",
         "UMask": "0x4",
         "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Counts the number of core cycles the fetch stalls because of an icache miss. This is a cummulative count of core cycles the fetch stalled for all icache misses. "
+        "BriefDescription": "Counts the number of core cycles the fetch stalls because of an icache miss. This is a cummulative count of core cycles the fetch stalled for all icache misses."
     },
     {
-        "PublicDescription": "This event counts the number of load micro-ops retired that miss in L1 Data cache. Note that prefetch misses will not be counted. ",
+        "PublicDescription": "This event counts the number of load micro-ops retired that miss in L1 Data cache. Note that prefetch misses will not be counted.",
         "EventCode": "0x04",
         "Counter": "0,1",
         "UMask": "0x1",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000070 ",
+        "MSRValue": "0x4000000070",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.OUTSTANDING",
         "MSRIndex": "0x1a6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Prefetch requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+        "BriefDescription": "Counts any Prefetch requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000400070 ",
+        "MSRValue": "0x1000400070",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800400070 ",
+        "MSRValue": "0x0800400070",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000080070 ",
+        "MSRValue": "0x1000080070",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800080070 ",
+        "MSRValue": "0x0800080070",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010070 ",
+        "MSRValue": "0x0000010070",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x40000032f7 ",
+        "MSRValue": "0x40000032f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
         "MSRIndex": "0x1a6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Read request  that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+        "BriefDescription": "Counts any Read request  that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x10004032f7 ",
+        "MSRValue": "0x10004032f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Read request  that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts any Read request  that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x08004032f7 ",
+        "MSRValue": "0x08004032f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x10000832f7 ",
+        "MSRValue": "0x10000832f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Read request  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts any Read request  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x08000832f7 ",
+        "MSRValue": "0x08000832f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Read request  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts any Read request  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x00000132f7 ",
+        "MSRValue": "0x00000132f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000044 ",
+        "MSRValue": "0x4000000044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.OUTSTANDING",
         "MSRIndex": "0x1a6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000400044 ",
+        "MSRValue": "0x1000400044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800400044 ",
+        "MSRValue": "0x0800400044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000080044 ",
+        "MSRValue": "0x1000080044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800080044 ",
+        "MSRValue": "0x0800080044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010044 ",
+        "MSRValue": "0x0000010044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000022 ",
+        "MSRValue": "0x4000000022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
         "MSRIndex": "0x1a6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data write requests  that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+        "BriefDescription": "Counts Demand cacheable data write requests  that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000400022 ",
+        "MSRValue": "0x1000400022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800400022 ",
+        "MSRValue": "0x0800400022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000080022 ",
+        "MSRValue": "0x1000080022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800080022 ",
+        "MSRValue": "0x0800080022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010022 ",
+        "MSRValue": "0x0000010022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x4000003091 ",
+        "MSRValue": "0x4000003091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
         "MSRIndex": "0x1a6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000403091 ",
+        "MSRValue": "0x1000403091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800403091 ",
+        "MSRValue": "0x0800403091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000083091 ",
+        "MSRValue": "0x1000083091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800083091 ",
+        "MSRValue": "0x0800083091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000013091 ",
+        "MSRValue": "0x0000013091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x4000008000 ",
+        "MSRValue": "0x4000008000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
         "MSRIndex": "0x1a6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+        "BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000408000 ",
+        "MSRValue": "0x1000408000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800408000 ",
+        "MSRValue": "0x0800408000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000088000 ",
+        "MSRValue": "0x1000088000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800088000 ",
+        "MSRValue": "0x0800088000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000018000 ",
+        "MSRValue": "0x0000018000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000014800 ",
+        "MSRValue": "0x0000014800",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000014000 ",
+        "MSRValue": "0x0000014000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x4000002000 ",
+        "MSRValue": "0x4000002000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
         "MSRIndex": "0x1a6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L1 data HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+        "BriefDescription": "Counts L1 data HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000402000 ",
+        "MSRValue": "0x1000402000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800402000 ",
+        "MSRValue": "0x0800402000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000082000 ",
+        "MSRValue": "0x1000082000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800082000 ",
+        "MSRValue": "0x0800082000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000012000 ",
+        "MSRValue": "0x0000012000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x4000001000 ",
+        "MSRValue": "0x4000001000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.OUTSTANDING",
         "MSRIndex": "0x1a6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Software Prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+        "BriefDescription": "Counts Software Prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000401000 ",
+        "MSRValue": "0x1000401000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800401000 ",
+        "MSRValue": "0x0800401000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000081000 ",
+        "MSRValue": "0x1000081000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800081000 ",
+        "MSRValue": "0x0800081000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000011000 ",
+        "MSRValue": "0x0000011000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010800 ",
+        "MSRValue": "0x0000010800",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000400 ",
+        "MSRValue": "0x4000000400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
         "MSRIndex": "0x1a6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Bus locks and split lock requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+        "BriefDescription": "Counts Bus locks and split lock requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000400400 ",
+        "MSRValue": "0x1000400400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800400400 ",
+        "MSRValue": "0x0800400400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000080400 ",
+        "MSRValue": "0x1000080400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800080400 ",
+        "MSRValue": "0x0800080400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010400 ",
+        "MSRValue": "0x0000010400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000200 ",
+        "MSRValue": "0x4000000200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.OUTSTANDING",
         "MSRIndex": "0x1a6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000400200 ",
+        "MSRValue": "0x1000400200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800400200 ",
+        "MSRValue": "0x0800400200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000080200 ",
+        "MSRValue": "0x1000080200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800080200 ",
+        "MSRValue": "0x0800080200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010200 ",
+        "MSRValue": "0x0000010200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000400100 ",
+        "MSRValue": "0x1000400100",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800400100 ",
+        "MSRValue": "0x0800400100",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000080100 ",
+        "MSRValue": "0x1000080100",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800080100 ",
+        "MSRValue": "0x0800080100",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010100 ",
+        "MSRValue": "0x0000010100",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000080 ",
+        "MSRValue": "0x4000000080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING",
         "MSRIndex": "0x1a6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000400080 ",
+        "MSRValue": "0x1000400080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800400080 ",
+        "MSRValue": "0x0800400080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000080080 ",
+        "MSRValue": "0x1000080080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800080080 ",
+        "MSRValue": "0x0800080080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010080 ",
+        "MSRValue": "0x0000010080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000040 ",
+        "MSRValue": "0x4000000040",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.OUTSTANDING",
         "MSRIndex": "0x1a6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 code HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+        "BriefDescription": "Counts L2 code HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000400040 ",
+        "MSRValue": "0x1000400040",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800400040 ",
+        "MSRValue": "0x0800400040",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000080040 ",
+        "MSRValue": "0x1000080040",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800080040 ",
+        "MSRValue": "0x0800080040",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010040 ",
+        "MSRValue": "0x0000010040",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000400020 ",
+        "MSRValue": "0x1000400020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800400020 ",
+        "MSRValue": "0x0800400020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000080020 ",
+        "MSRValue": "0x1000080020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800080020 ",
+        "MSRValue": "0x0800080020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000020020 ",
+        "MSRValue": "0x0000020020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010020 ",
+        "MSRValue": "0x0000010020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000004 ",
+        "MSRValue": "0x4000000004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
         "MSRIndex": "0x1a6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand code reads and prefetch code reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+        "BriefDescription": "Counts demand code reads and prefetch code reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000400004 ",
+        "MSRValue": "0x1000400004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800400004 ",
+        "MSRValue": "0x0800400004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000080004 ",
+        "MSRValue": "0x1000080004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800080004 ",
+        "MSRValue": "0x0800080004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010004 ",
+        "MSRValue": "0x0000010004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000002 ",
+        "MSRValue": "0x4000000002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
         "MSRIndex": "0x1a6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data writes that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+        "BriefDescription": "Counts Demand cacheable data writes that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000400002 ",
+        "MSRValue": "0x1000400002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800400002 ",
+        "MSRValue": "0x0800400002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000080002 ",
+        "MSRValue": "0x1000080002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800080002 ",
+        "MSRValue": "0x0800080002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010002 ",
+        "MSRValue": "0x0000010002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x4000000001 ",
+        "MSRValue": "0x4000000001",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
         "MSRIndex": "0x1a6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000400001 ",
+        "MSRValue": "0x1000400001",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_FAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800400001 ",
+        "MSRValue": "0x0800400001",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_FAR_TILE_E_F",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1000080001 ",
+        "MSRValue": "0x1000080001",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_NEAR_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0800080001 ",
+        "MSRValue": "0x0800080001",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_NEAR_TILE_E_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0000010001 ",
+        "MSRValue": "0x0000010001",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0002000001 ",
+        "MSRValue": "0x0002000001",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in M state ",
+        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in M state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0002000002 ",
+        "MSRValue": "0x0002000002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in M state ",
+        "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in M state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0002000004 ",
+        "MSRValue": "0x0002000004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in M state ",
+        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in M state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0002000020 ",
+        "MSRValue": "0x0002000020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in M state ",
+        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in M state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0002000080 ",
+        "MSRValue": "0x0002000080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for responses which hit its own tile's L2 with data in M state ",
+        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for responses which hit its own tile's L2 with data in M state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0002000100 ",
+        "MSRValue": "0x0002000100",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_M",
         "MSRIndex": "0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in M state ",
+        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in M state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0002000200 ",
+        "MSRValue": "0x0002000200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for responses which hit its own tile's L2 with data in M state ",
+        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for responses which hit its own tile's L2 with data in M state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0002000400 ",
+        "MSRValue": "0x0002000400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in M state ",
+        "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in M state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0002001000 ",
+        "MSRValue": "0x0002001000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in M state ",
+        "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in M state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0002002000 ",
+        "MSRValue": "0x0002002000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in M state ",
+        "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in M state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0002008000 ",
+        "MSRValue": "0x0002008000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in M state ",
+        "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in M state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0002003091 ",
+        "MSRValue": "0x0002003091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for responses which hit its own tile's L2 with data in M state ",
+        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for responses which hit its own tile's L2 with data in M state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0002000022 ",
+        "MSRValue": "0x0002000022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for responses which hit its own tile's L2 with data in M state ",
+        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for responses which hit its own tile's L2 with data in M state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0002000044 ",
+        "MSRValue": "0x0002000044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for responses which hit its own tile's L2 with data in M state ",
+        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for responses which hit its own tile's L2 with data in M state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x00020032f7 ",
+        "MSRValue": "0x00020032f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Read request  that accounts for responses which hit its own tile's L2 with data in M state ",
+        "BriefDescription": "Counts any Read request  that accounts for responses which hit its own tile's L2 with data in M state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0002000070 ",
+        "MSRValue": "0x0002000070",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_M",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in M state ",
+        "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in M state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0004000001 ",
+        "MSRValue": "0x0004000001",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0004000002 ",
+        "MSRValue": "0x0004000002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0004000004 ",
+        "MSRValue": "0x0004000004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0004000020 ",
+        "MSRValue": "0x0004000020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0004000040 ",
+        "MSRValue": "0x0004000040",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0004000080 ",
+        "MSRValue": "0x0004000080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0004000100 ",
+        "MSRValue": "0x0004000100",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0004000200 ",
+        "MSRValue": "0x0004000200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0004000400 ",
+        "MSRValue": "0x0004000400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0004001000 ",
+        "MSRValue": "0x0004001000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0004002000 ",
+        "MSRValue": "0x0004002000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0004008000 ",
+        "MSRValue": "0x0004008000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0004003091 ",
+        "MSRValue": "0x0004003091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0004000022 ",
+        "MSRValue": "0x0004000022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0004000044 ",
+        "MSRValue": "0x0004000044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x00040032f7 ",
+        "MSRValue": "0x00040032f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Read request  that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts any Read request  that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0004000070 ",
+        "MSRValue": "0x0004000070",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_E",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in E state ",
+        "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in E state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0008000001 ",
+        "MSRValue": "0x0008000001",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_S",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in S state ",
+        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in S state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0008000002 ",
+        "MSRValue": "0x0008000002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_S",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in S state ",
+        "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in S state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0008000004 ",
+        "MSRValue": "0x0008000004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_S",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in S state ",
+        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in S state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0008000020 ",
+        "MSRValue": "0x0008000020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_S",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in S state ",
+        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in S state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0008000080 ",
+        "MSRValue": "0x0008000080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_S",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for responses which hit its own tile's L2 with data in S state ",
+        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for responses which hit its own tile's L2 with data in S state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0008000100 ",
+        "MSRValue": "0x0008000100",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_S",
         "MSRIndex": "0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in S state ",
+        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in S state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0008000200 ",
+        "MSRValue": "0x0008000200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_S",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for responses which hit its own tile's L2 with data in S state ",
+        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for responses which hit its own tile's L2 with data in S state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0008000400 ",
+        "MSRValue": "0x0008000400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_S",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in S state ",
+        "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in S state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0008001000 ",
+        "MSRValue": "0x0008001000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_S",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in S state ",
+        "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in S state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0008002000 ",
+        "MSRValue": "0x0008002000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_S",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in S state ",
+        "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in S state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0008008000 ",
+        "MSRValue": "0x0008008000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_S",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in S state ",
+        "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in S state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0008003091 ",
+        "MSRValue": "0x0008003091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_S",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for responses which hit its own tile's L2 with data in S state ",
+        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for responses which hit its own tile's L2 with data in S state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0008000022 ",
+        "MSRValue": "0x0008000022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_S",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for responses which hit its own tile's L2 with data in S state ",
+        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for responses which hit its own tile's L2 with data in S state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0008000044 ",
+        "MSRValue": "0x0008000044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_S",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for responses which hit its own tile's L2 with data in S state ",
+        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for responses which hit its own tile's L2 with data in S state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x00080032f7 ",
+        "MSRValue": "0x00080032f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_S",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Read request  that accounts for responses which hit its own tile's L2 with data in S state ",
+        "BriefDescription": "Counts any Read request  that accounts for responses which hit its own tile's L2 with data in S state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0010000001 ",
+        "MSRValue": "0x0010000001",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0010000002 ",
+        "MSRValue": "0x0010000002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0010000004 ",
+        "MSRValue": "0x0010000004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0010000020 ",
+        "MSRValue": "0x0010000020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0010000040 ",
+        "MSRValue": "0x0010000040",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0010000080 ",
+        "MSRValue": "0x0010000080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0010000100 ",
+        "MSRValue": "0x0010000100",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0010000200 ",
+        "MSRValue": "0x0010000200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0010000400 ",
+        "MSRValue": "0x0010000400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0010001000 ",
+        "MSRValue": "0x0010001000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0010002000 ",
+        "MSRValue": "0x0010002000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0010008000 ",
+        "MSRValue": "0x0010008000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0010003091 ",
+        "MSRValue": "0x0010003091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0010000022 ",
+        "MSRValue": "0x0010000022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0010000044 ",
+        "MSRValue": "0x0010000044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x00100032f7 ",
+        "MSRValue": "0x00100032f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Read request  that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts any Read request  that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0010000070 ",
+        "MSRValue": "0x0010000070",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_F",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in F state ",
+        "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in F state",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800180002 ",
+        "MSRValue": "0x1800180002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800180004 ",
+        "MSRValue": "0x1800180004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800180020 ",
+        "MSRValue": "0x1800180020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800180040 ",
+        "MSRValue": "0x1800180040",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800180080 ",
+        "MSRValue": "0x1800180080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800180100 ",
+        "MSRValue": "0x1800180100",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800180200 ",
+        "MSRValue": "0x1800180200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800180400 ",
+        "MSRValue": "0x1800180400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800181000 ",
+        "MSRValue": "0x1800181000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800182000 ",
+        "MSRValue": "0x1800182000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800188000 ",
+        "MSRValue": "0x1800188000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800183091 ",
+        "MSRValue": "0x1800183091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800180022 ",
+        "MSRValue": "0x1800180022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800180044 ",
+        "MSRValue": "0x1800180044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x18001832f7 ",
+        "MSRValue": "0x18001832f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800180070 ",
+        "MSRValue": "0x1800180070",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800400002 ",
+        "MSRValue": "0x1800400002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800400004 ",
+        "MSRValue": "0x1800400004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800400040 ",
+        "MSRValue": "0x1800400040",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800400080 ",
+        "MSRValue": "0x1800400080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800400100 ",
+        "MSRValue": "0x1800400100",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800400400 ",
+        "MSRValue": "0x1800400400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800401000 ",
+        "MSRValue": "0x1800401000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800402000 ",
+        "MSRValue": "0x1800402000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800408000 ",
+        "MSRValue": "0x1800408000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800403091 ",
+        "MSRValue": "0x1800403091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800400022 ",
+        "MSRValue": "0x1800400022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800400044 ",
+        "MSRValue": "0x1800400044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x18004032f7 ",
+        "MSRValue": "0x18004032f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x1800400070 ",
+        "MSRValue": "0x1800400070",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE",
index 7006525662007812937c49e8724a22e72fa8b406..c6bb16ba0f8653aa36b5abd58a0e9ec1d3152869 100644 (file)
@@ -9,18 +9,18 @@
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0100400070 ",
+        "MSRValue": "0x0100400070",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Prefetch requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts any Prefetch requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080200070 ",
+        "MSRValue": "0x0080200070",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0101000070 ",
+        "MSRValue": "0x0101000070",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.DDR_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Prefetch requests that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts any Prefetch requests that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080800070 ",
+        "MSRValue": "0x0080800070",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x01004032f7 ",
+        "MSRValue": "0x01004032f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Read request  that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts any Read request  that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x00802032f7 ",
+        "MSRValue": "0x00802032f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x01010032f7 ",
+        "MSRValue": "0x01010032f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.DDR_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any Read request  that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts any Read request  that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x00808032f7 ",
+        "MSRValue": "0x00808032f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0100400044 ",
+        "MSRValue": "0x0100400044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080200044 ",
+        "MSRValue": "0x0080200044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0101000044 ",
+        "MSRValue": "0x0101000044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts Demand code reads and prefetch code read requests  that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080800044 ",
+        "MSRValue": "0x0080800044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0100400022 ",
+        "MSRValue": "0x0100400022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080200022 ",
+        "MSRValue": "0x0080200022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0101000022 ",
+        "MSRValue": "0x0101000022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts Demand cacheable data write requests  that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080800022 ",
+        "MSRValue": "0x0080800022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0100403091 ",
+        "MSRValue": "0x0100403091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080203091 ",
+        "MSRValue": "0x0080203091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0101003091 ",
+        "MSRValue": "0x0101003091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests  that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080803091 ",
+        "MSRValue": "0x0080803091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0100408000 ",
+        "MSRValue": "0x0100408000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any request that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts any request that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080208000 ",
+        "MSRValue": "0x0080208000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0101008000 ",
+        "MSRValue": "0x0101008000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts any request that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts any request that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080808000 ",
+        "MSRValue": "0x0080808000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0100402000 ",
+        "MSRValue": "0x0100402000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.MCDRAM_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080202000 ",
+        "MSRValue": "0x0080202000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0101002000 ",
+        "MSRValue": "0x0101002000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080802000 ",
+        "MSRValue": "0x0080802000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0100401000 ",
+        "MSRValue": "0x0100401000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Software Prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts Software Prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080201000 ",
+        "MSRValue": "0x0080201000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0101001000 ",
+        "MSRValue": "0x0101001000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Software Prefetches that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts Software Prefetches that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080801000 ",
+        "MSRValue": "0x0080801000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0100400400 ",
+        "MSRValue": "0x0100400400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080200400 ",
+        "MSRValue": "0x0080200400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0101000400 ",
+        "MSRValue": "0x0101000400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080800400 ",
+        "MSRValue": "0x0080800400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0100400200 ",
+        "MSRValue": "0x0100400200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080200200 ",
+        "MSRValue": "0x0080200200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0101000200 ",
+        "MSRValue": "0x0101000200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts UC code reads (valid only for Outstanding response type)  that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080800200 ",
+        "MSRValue": "0x0080800200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0100400100 ",
+        "MSRValue": "0x0100400100",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM_FAR",
         "MSRIndex": "0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080200100 ",
+        "MSRValue": "0x0080200100",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0101000100 ",
+        "MSRValue": "0x0101000100",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.DDR_FAR",
         "MSRIndex": "0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080800100 ",
+        "MSRValue": "0x0080800100",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x2000020080 ",
+        "MSRValue": "0x2000020080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.NON_DRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0100400080 ",
+        "MSRValue": "0x0100400080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080200080 ",
+        "MSRValue": "0x0080200080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0101000080 ",
+        "MSRValue": "0x0101000080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type).  that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080800080 ",
+        "MSRValue": "0x0080800080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0100400040 ",
+        "MSRValue": "0x0100400040",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.MCDRAM_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080200040 ",
+        "MSRValue": "0x0080200040",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0101000040 ",
+        "MSRValue": "0x0101000040",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080800040 ",
+        "MSRValue": "0x0080800040",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x2000020020 ",
+        "MSRValue": "0x2000020020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.NON_DRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0100400020 ",
+        "MSRValue": "0x0100400020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080200020 ",
+        "MSRValue": "0x0080200020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0101000020 ",
+        "MSRValue": "0x0101000020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080800020 ",
+        "MSRValue": "0x0080800020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0100400004 ",
+        "MSRValue": "0x0100400004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080200004 ",
+        "MSRValue": "0x0080200004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0101000004 ",
+        "MSRValue": "0x0101000004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080800004 ",
+        "MSRValue": "0x0080800004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0100400002 ",
+        "MSRValue": "0x0100400002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080200002 ",
+        "MSRValue": "0x0080200002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0101000002 ",
+        "MSRValue": "0x0101000002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080800002 ",
+        "MSRValue": "0x0080800002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0100400001 ",
+        "MSRValue": "0x0100400001",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080200001 ",
+        "MSRValue": "0x0080200001",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0101000001 ",
+        "MSRValue": "0x0101000001",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR_FAR",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from DRAM Far. ",
+        "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from DRAM Far.",
         "Offcore": "1"
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0080800001 ",
+        "MSRValue": "0x0080800001",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR_NEAR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0180600001 ",
+        "MSRValue": "0x0180600001",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0180600002 ",
+        "MSRValue": "0x0180600002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0180600004 ",
+        "MSRValue": "0x0180600004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0180600020 ",
+        "MSRValue": "0x0180600020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0180600080 ",
+        "MSRValue": "0x0180600080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0180600100 ",
+        "MSRValue": "0x0180600100",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0180600200 ",
+        "MSRValue": "0x0180600200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0180600400 ",
+        "MSRValue": "0x0180600400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0180601000 ",
+        "MSRValue": "0x0180601000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0180608000 ",
+        "MSRValue": "0x0180608000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0180603091 ",
+        "MSRValue": "0x0180603091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0180600022 ",
+        "MSRValue": "0x0180600022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0180600044 ",
+        "MSRValue": "0x0180600044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x01806032f7 ",
+        "MSRValue": "0x01806032f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0180600070 ",
+        "MSRValue": "0x0180600070",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0181800001 ",
+        "MSRValue": "0x0181800001",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0181800002 ",
+        "MSRValue": "0x0181800002",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0181800004 ",
+        "MSRValue": "0x0181800004",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0181800020 ",
+        "MSRValue": "0x0181800020",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0181800040 ",
+        "MSRValue": "0x0181800040",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0181800080 ",
+        "MSRValue": "0x0181800080",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0181800200 ",
+        "MSRValue": "0x0181800200",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0181800400 ",
+        "MSRValue": "0x0181800400",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0181801000 ",
+        "MSRValue": "0x0181801000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0181802000 ",
+        "MSRValue": "0x0181802000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0181808000 ",
+        "MSRValue": "0x0181808000",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0181803091 ",
+        "MSRValue": "0x0181803091",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0181800022 ",
+        "MSRValue": "0x0181800022",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x0181800044 ",
+        "MSRValue": "0x0181800044",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR",
     },
     {
         "EventCode": "0xB7",
-        "MSRValue": "0x01818032f7 ",
+        "MSRValue": "0x01818032f7",
         "Counter": "0,1",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.ANY_READ.DDR",
index bb5494cfb5aed79da1a0135537614c26a75c5461..92e4ef2e22c62da98000026b0233df542b6bafdb 100644 (file)
         "BriefDescription": "Counts the number of micro-ops retired that are from the complex flows issued by the micro-sequencer (MS)."
     },
     {
-        "PublicDescription": "This event counts the number of micro-ops (uops) retired. The processor decodes complex macro instructions into a sequence of simpler uops. Most instructions are composed of one or two uops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists. ",
+        "PublicDescription": "This event counts the number of micro-ops (uops) retired. The processor decodes complex macro instructions into a sequence of simpler uops. Most instructions are composed of one or two uops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists.",
         "EventCode": "0xC2",
         "Counter": "0,1",
         "UMask": "0x10",
         "UMask": "0x20",
         "EventName": "NO_ALLOC_CYCLES.RAT_STALL",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and a RATstall (caused by reservation station full) is asserted.  "
+        "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and a RATstall (caused by reservation station full) is asserted."
     },
     {
         "PublicDescription": "This event counts the number of core cycles when no uops are allocated, the instruction queue is empty and the alloc pipe is stalled waiting for instructions to be fetched.",
         "UMask": "0x1f",
         "EventName": "RS_FULL_STALL.ALL",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Counts the total number of core cycles the Alloc pipeline is stalled when any one of the reservation stations is full. "
+        "BriefDescription": "Counts the total number of core cycles the Alloc pipeline is stalled when any one of the reservation stations is full."
     },
     {
         "EventCode": "0xC0",
         "UMask": "0x1",
         "EventName": "CYCLES_DIV_BUSY.ALL",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles the number of core cycles when divider is busy.  Does not imply a stall waiting for the divider.  "
+        "BriefDescription": "Cycles the number of core cycles when divider is busy.  Does not imply a stall waiting for the divider."
     },
     {
         "PublicDescription": "This event counts the number of instructions that retire.  For instructions that consist of multiple micro-ops, this event counts exactly once, as the last micro-op of the instruction retires.  The event continues counting while instructions retire, including during interrupt service routines caused by hardware interrupts, faults or traps.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 1",
         "UMask": "0x1",
         "EventName": "INST_RETIRED.ANY",
         "BriefDescription": "Counts the number of unhalted reference clock cycles"
     },
     {
-        "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter\r\n",
-        "EventCode": "0x00",
+        "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter",
         "Counter": "Fixed counter 2",
         "UMask": "0x2",
         "EventName": "CPU_CLK_UNHALTED.THREAD",
         "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles"
     },
     {
-        "EventCode": "0x00",
         "Counter": "Fixed counter 3",
         "UMask": "0x3",
         "EventName": "CPU_CLK_UNHALTED.REF_TSC",
         "UMask": "0x1",
         "EventName": "RECYCLEQ.LD_BLOCK_ST_FORWARD",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Counts the number of occurences a retired load gets blocked because its address partially overlaps with a store ",
+        "BriefDescription": "Counts the number of occurences a retired load gets blocked because its address partially overlaps with a store",
         "Data_LA": "1"
     },
     {
index f31594507f8c60960f76c0bce8298002ae66b152..9e493977771f178c0f93365a4601310d64c09d3a 100644 (file)
@@ -36,7 +36,7 @@
         "EdgeDetect": "1"
     },
     {
-        "PublicDescription": "This event counts every cycle when an I-side (walks due to an instruction fetch) page walk is in progress. ",
+        "PublicDescription": "This event counts every cycle when an I-side (walks due to an instruction fetch) page walk is in progress.",
         "EventCode": "0x05",
         "Counter": "0,1",
         "UMask": "0x2",
index e05c2c8458fcb2c5645cd121f173267228905fcb..d6984a3017e06b609d5b69ce289b3441d0ae0b7c 100644 (file)
@@ -33,3 +33,4 @@ GenuineIntel-6-25,v2,westmereep-sp,core
 GenuineIntel-6-2F,v2,westmereex,core
 GenuineIntel-6-55-[01234],v1,skylakex,core
 GenuineIntel-6-55-[56789ABCDEF],v1,cascadelakex,core
+AuthenticAMD-23-[[:xdigit:]]+,v1,amdfam17h,core
index 16b04a20bc12ca424147ea104b2c6d92f55131bd..bb79e89c2049d272f2c840435effdb9ef1bf14ca 100644 (file)
 [
     {
-        "PEBS": "1",
-        "EventCode": "0xD0",
+        "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x11",
-        "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Retired load uops that miss the STLB.",
-        "CounterHTOff": "0,1,2,3"
+        "UMask": "0x1",
+        "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Demand Data Read requests that hit L2 cache.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "EventCode": "0xD0",
+        "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x12",
-        "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Retired store uops that miss the STLB.",
-        "CounterHTOff": "0,1,2,3"
+        "UMask": "0x3",
+        "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Demand Data Read requests.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "EventCode": "0xD0",
+        "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x21",
-        "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Retired load uops with locked access.",
-        "CounterHTOff": "0,1,2,3"
+        "UMask": "0x4",
+        "EventName": "L2_RQSTS.RFO_HIT",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "RFO requests that hit L2 cache.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "PublicDescription": "This event counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
-        "EventCode": "0xD0",
+        "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x41",
-        "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Retired load uops that split across a cacheline boundary.",
-        "CounterHTOff": "0,1,2,3"
+        "UMask": "0x8",
+        "EventName": "L2_RQSTS.RFO_MISS",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "RFO requests that miss L2 cache.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "PublicDescription": "This event counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
-        "EventCode": "0xD0",
+        "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x42",
-        "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Retired store uops that split across a cacheline boundary.",
-        "CounterHTOff": "0,1,2,3"
+        "UMask": "0xc",
+        "EventName": "L2_RQSTS.ALL_RFO",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "RFO requests to L2 cache.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "PublicDescription": "This event counts the number of load uops retired",
-        "EventCode": "0xD0",
+        "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x81",
-        "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "All retired load uops.",
-        "CounterHTOff": "0,1,2,3"
+        "UMask": "0x10",
+        "EventName": "L2_RQSTS.CODE_RD_HIT",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "PublicDescription": "This event counts the number of store uops retired.",
-        "EventCode": "0xD0",
+        "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x82",
-        "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "All retired store uops.",
-        "CounterHTOff": "0,1,2,3"
+        "UMask": "0x20",
+        "EventName": "L2_RQSTS.CODE_RD_MISS",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "L2 cache misses when fetching instructions.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "EventCode": "0xD1",
+        "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
-        "CounterHTOff": "0,1,2,3"
+        "UMask": "0x30",
+        "EventName": "L2_RQSTS.ALL_CODE_RD",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "L2 code requests.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "EventCode": "0xD1",
+        "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
-        "CounterHTOff": "0,1,2,3"
+        "UMask": "0x40",
+        "EventName": "L2_RQSTS.PF_HIT",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required.",
-        "EventCode": "0xD1",
+        "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x4",
-        "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
-        "SampleAfterValue": "50021",
-        "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
-        "CounterHTOff": "0,1,2,3"
+        "UMask": "0x80",
+        "EventName": "L2_RQSTS.PF_MISS",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "EventCode": "0xD1",
+        "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x40",
-        "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
-        "CounterHTOff": "0,1,2,3"
+        "UMask": "0xc0",
+        "EventName": "L2_RQSTS.ALL_PF",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Requests from L2 hardware prefetchers.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "EventCode": "0xD2",
+        "EventCode": "0x27",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
-        "SampleAfterValue": "20011",
-        "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
-        "CounterHTOff": "0,1,2,3"
-    },
-    {
-        "PEBS": "1",
-        "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package).  Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line.  In this case, a snoop was required, and another L2 had the line in a non-modified state.",
-        "EventCode": "0xD2",
-        "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
-        "SampleAfterValue": "20011",
-        "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
-        "CounterHTOff": "0,1,2,3"
+        "EventName": "L2_STORE_LOCK_RQSTS.MISS",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "RFOs that miss cache lines.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package).  Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line.  In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2.",
-        "EventCode": "0xD2",
+        "EventCode": "0x27",
         "Counter": "0,1,2,3",
         "UMask": "0x4",
-        "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
-        "SampleAfterValue": "20011",
-        "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
-        "CounterHTOff": "0,1,2,3"
+        "EventName": "L2_STORE_LOCK_RQSTS.HIT_E",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "RFOs that hit cache lines in E state.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "EventCode": "0xD2",
+        "EventCode": "0x27",
         "Counter": "0,1,2,3",
         "UMask": "0x8",
-        "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
-        "CounterHTOff": "0,1,2,3"
+        "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "RFOs that hit cache lines in M state.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "PublicDescription": "This event counts retired demand loads that missed the  last-level (L3) cache. This means that the load is usually satisfied from memory in a client system or possibly from the remote socket in a server. Demand loads are non speculative load uops.",
-        "EventCode": "0xD4",
+        "EventCode": "0x27",
         "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Retired load uops with unknown information as data source in cache serviced the load.",
-        "CounterHTOff": "0,1,2,3"
+        "UMask": "0xf",
+        "EventName": "L2_STORE_LOCK_RQSTS.ALL",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "RFOs that access cache lines in any state.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts L1D data line replacements.  Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.  ",
-        "EventCode": "0x51",
+        "EventCode": "0x28",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "L1D.REPLACEMENT",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "L1D data line replacements.",
+        "EventName": "L2_L1D_WB_RQSTS.MISS",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x51",
+        "EventCode": "0x28",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
-        "EventName": "L1D.ALLOCATED_IN_M",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Allocated L1D data cache lines in M state.",
+        "EventName": "L2_L1D_WB_RQSTS.HIT_S",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x51",
+        "EventCode": "0x28",
         "Counter": "0,1,2,3",
         "UMask": "0x4",
-        "EventName": "L1D.EVICTION",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "L1D data cache lines in M state evicted due to replacement.",
+        "EventName": "L2_L1D_WB_RQSTS.HIT_E",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x51",
+        "EventCode": "0x28",
         "Counter": "0,1,2,3",
         "UMask": "0x8",
-        "EventName": "L1D.ALL_M_REPLACEMENT",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.",
+        "EventName": "L2_L1D_WB_RQSTS.HIT_M",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0x28",
+        "Counter": "0,1,2,3",
+        "UMask": "0xf",
+        "EventName": "L2_L1D_WB_RQSTS.ALL",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0x2E",
+        "Counter": "0,1,2,3",
+        "UMask": "0x41",
+        "EventName": "LONGEST_LAT_CACHE.MISS",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Core-originated cacheable demand requests missed LLC.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0x2E",
+        "Counter": "0,1,2,3",
+        "UMask": "0x4f",
+        "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Core-originated cacheable demand requests that refer to LLC.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "CounterHTOff": "2"
     },
     {
-        "EventCode": "0x63",
+        "EventCode": "0x48",
+        "Counter": "2",
+        "UMask": "0x1",
+        "AnyThread": "1",
+        "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+        "CounterMask": "1",
+        "CounterHTOff": "2"
+    },
+    {
+        "EventCode": "0x48",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
-        "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+        "EventName": "L1D_PEND_MISS.FB_FULL",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles when L1D is locked.",
+        "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+        "CounterMask": "1",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "PublicDescription": "This event counts L1D data line replacements.  Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.",
+        "EventCode": "0x51",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "L1D.REPLACEMENT",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "L1D data line replacements.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0x51",
+        "Counter": "0,1,2,3",
+        "UMask": "0x2",
+        "EventName": "L1D.ALLOCATED_IN_M",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Allocated L1D data cache lines in M state.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0x51",
+        "Counter": "0,1,2,3",
+        "UMask": "0x4",
+        "EventName": "L1D.EVICTION",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "L1D data cache lines in M state evicted due to replacement.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0x51",
+        "Counter": "0,1,2,3",
+        "UMask": "0x8",
+        "EventName": "L1D.ALL_M_REPLACEMENT",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
+    {
+        "EventCode": "0x60",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+        "CounterMask": "6",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
     {
         "EventCode": "0x60",
         "Counter": "0,1,2,3",
         "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
+    {
+        "EventCode": "0x60",
+        "Counter": "0,1,2,3",
+        "UMask": "0x4",
+        "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+        "CounterMask": "1",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
     {
         "EventCode": "0x60",
         "Counter": "0,1,2,3",
         "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
+    {
+        "EventCode": "0x63",
+        "Counter": "0,1,2,3",
+        "UMask": "0x2",
+        "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Cycles when L1D is locked.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
     {
         "EventCode": "0xB0",
         "Counter": "0,1,2,3",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x24",
+        "EventCode": "0xBF",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Demand Data Read requests that hit L2 cache.",
+        "UMask": "0x5",
+        "EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.",
+        "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x24",
+        "PEBS": "1",
+        "EventCode": "0xD0",
         "Counter": "0,1,2,3",
-        "UMask": "0x4",
-        "EventName": "L2_RQSTS.RFO_HIT",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "RFO requests that hit L2 cache.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "UMask": "0x11",
+        "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x24",
+        "PEBS": "1",
+        "EventCode": "0xD0",
         "Counter": "0,1,2,3",
-        "UMask": "0x8",
-        "EventName": "L2_RQSTS.RFO_MISS",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "RFO requests that miss L2 cache.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "UMask": "0x12",
+        "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PEBS": "1",
+        "EventCode": "0xD0",
+        "Counter": "0,1,2,3",
+        "UMask": "0x21",
+        "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+        "SampleAfterValue": "100007",
+        "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x24",
+        "PEBS": "1",
+        "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K). (Precise Event - PEBS)",
+        "EventCode": "0xD0",
         "Counter": "0,1,2,3",
-        "UMask": "0x10",
-        "EventName": "L2_RQSTS.CODE_RD_HIT",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "UMask": "0x41",
+        "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x24",
+        "PEBS": "1",
+        "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K). (Precise Event - PEBS)",
+        "EventCode": "0xD0",
         "Counter": "0,1,2,3",
-        "UMask": "0x20",
-        "EventName": "L2_RQSTS.CODE_RD_MISS",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "L2 cache misses when fetching instructions.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "UMask": "0x42",
+        "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x24",
+        "PEBS": "1",
+        "PublicDescription": "This event counts the number of load uops retired (Precise Event)",
+        "EventCode": "0xD0",
         "Counter": "0,1,2,3",
-        "UMask": "0x40",
-        "EventName": "L2_RQSTS.PF_HIT",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "UMask": "0x81",
+        "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "All retired load uops. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x24",
+        "PEBS": "1",
+        "PublicDescription": "This event counts the number of store uops retired. (Precise Event - PEBS)",
+        "EventCode": "0xD0",
         "Counter": "0,1,2,3",
-        "UMask": "0x80",
-        "EventName": "L2_RQSTS.PF_MISS",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "UMask": "0x82",
+        "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "All retired store uops. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x27",
+        "PEBS": "1",
+        "EventCode": "0xD1",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "L2_STORE_LOCK_RQSTS.MISS",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "RFOs that miss cache lines.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x27",
+        "PEBS": "1",
+        "EventCode": "0xD1",
         "Counter": "0,1,2,3",
-        "UMask": "0x4",
-        "EventName": "L2_STORE_LOCK_RQSTS.HIT_E",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "RFOs that hit cache lines in E state.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "UMask": "0x2",
+        "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x27",
+        "PEBS": "1",
+        "PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required. (Precise Event - PEBS)",
+        "EventCode": "0xD1",
         "Counter": "0,1,2,3",
-        "UMask": "0x8",
-        "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "RFOs that hit cache lines in M state.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "UMask": "0x4",
+        "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
+        "SampleAfterValue": "50021",
+        "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x27",
+        "PEBS": "1",
+        "EventCode": "0xD1",
         "Counter": "0,1,2,3",
-        "UMask": "0xf",
-        "EventName": "L2_STORE_LOCK_RQSTS.ALL",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "RFOs that access cache lines in any state.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "UMask": "0x40",
+        "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x28",
+        "PEBS": "1",
+        "EventCode": "0xD2",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "L2_L1D_WB_RQSTS.MISS",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
+        "SampleAfterValue": "20011",
+        "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x28",
+        "PEBS": "1",
+        "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package).  Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line.  In this case, a snoop was required, and another L2 had the line in a non-modified state. (Precise Event - PEBS)",
+        "EventCode": "0xD2",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
-        "EventName": "L2_L1D_WB_RQSTS.HIT_S",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
+        "SampleAfterValue": "20011",
+        "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x28",
+        "PEBS": "1",
+        "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package).  Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line.  In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2. (Precise Event - PEBS)",
+        "EventCode": "0xD2",
         "Counter": "0,1,2,3",
         "UMask": "0x4",
-        "EventName": "L2_L1D_WB_RQSTS.HIT_E",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
+        "SampleAfterValue": "20011",
+        "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x28",
+        "PEBS": "1",
+        "EventCode": "0xD2",
         "Counter": "0,1,2,3",
         "UMask": "0x8",
-        "EventName": "L2_L1D_WB_RQSTS.HIT_M",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x28",
+        "PEBS": "1",
+        "PublicDescription": "This event counts retired demand loads that missed the  last-level (L3) cache. This means that the load is usually satisfied from memory in a client system or possibly from the remote socket in a server. Demand loads are non speculative load uops. (Precise Event - PEBS)",
+        "EventCode": "0xD4",
         "Counter": "0,1,2,3",
-        "UMask": "0xf",
-        "EventName": "L2_L1D_WB_RQSTS.ALL",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "UMask": "0x2",
+        "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS",
+        "SampleAfterValue": "100007",
+        "BriefDescription": "Retired load uops with unknown information as data source in cache serviced the load. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xF0",
         "BriefDescription": "Dirty L2 cache lines filling the L2.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
-    {
-        "EventCode": "0x2E",
-        "Counter": "0,1,2,3",
-        "UMask": "0x41",
-        "EventName": "LONGEST_LAT_CACHE.MISS",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Core-originated cacheable demand requests missed LLC.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0x2E",
-        "Counter": "0,1,2,3",
-        "UMask": "0x4f",
-        "EventName": "LONGEST_LAT_CACHE.REFERENCE",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Core-originated cacheable demand requests that refer to LLC.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
     {
         "EventCode": "0xF4",
         "Counter": "0,1,2,3",
         "BriefDescription": "Split locks in SQ.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
-    {
-        "EventCode": "0x24",
-        "Counter": "0,1,2,3",
-        "UMask": "0x3",
-        "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Demand Data Read requests.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0x24",
-        "Counter": "0,1,2,3",
-        "UMask": "0xc",
-        "EventName": "L2_RQSTS.ALL_RFO",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "RFO requests to L2 cache.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0x24",
-        "Counter": "0,1,2,3",
-        "UMask": "0x30",
-        "EventName": "L2_RQSTS.ALL_CODE_RD",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "L2 code requests.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0x24",
-        "Counter": "0,1,2,3",
-        "UMask": "0xc0",
-        "EventName": "L2_RQSTS.ALL_PF",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Requests from L2 hardware prefetchers.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0xBF",
-        "Counter": "0,1,2,3",
-        "UMask": "0x5",
-        "EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.",
-        "CounterMask": "1",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0x60",
-        "Counter": "0,1,2,3",
-        "UMask": "0x4",
-        "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
-        "CounterMask": "1",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0x60",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
-        "CounterMask": "6",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0x48",
-        "Counter": "2",
-        "UMask": "0x1",
-        "AnyThread": "1",
-        "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
-        "CounterMask": "1",
-        "CounterHTOff": "2"
-    },
-    {
-        "EventCode": "0x48",
-        "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "L1D_PEND_MISS.FB_FULL",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
-        "CounterMask": "1",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
     {
         "EventCode": "0xB7, 0xBB",
         "MSRValue": "0x10003c0244",
         "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": " REQUEST = DATA_INTO_CORE and RESPONSE = ANY_RESPONSE",
+        "BriefDescription": "REQUEST = DATA_INTO_CORE and RESPONSE = ANY_RESPONSE",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_M.HITM",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": " REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_M and SNOOP = HITM",
+        "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_M and SNOOP = HITM",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": " REQUEST = PF_RFO and RESPONSE = ANY_RESPONSE",
+        "BriefDescription": "REQUEST = PF_RFO and RESPONSE = ANY_RESPONSE",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventName": "OFFCORE_RESPONSE.PF_L_DATA_RD.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": " REQUEST = PF_LLC_DATA_RD and RESPONSE = ANY_RESPONSE",
+        "BriefDescription": "REQUEST = PF_LLC_DATA_RD and RESPONSE = ANY_RESPONSE",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventName": "OFFCORE_RESPONSE.PF_L_IFETCH.ANY_RESPONSE",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": " REQUEST = PF_LLC_IFETCH and RESPONSE = ANY_RESPONSE",
+        "BriefDescription": "REQUEST = PF_LLC_IFETCH and RESPONSE = ANY_RESPONSE",
         "CounterHTOff": "0,1,2,3"
     }
 ]
\ No newline at end of file
index 982eda48785ec8781ca3ffa1811779a32a6696db..ce26537c7d47f912c70bbbb906a1cc9b6b708c76 100644 (file)
@@ -1,67 +1,4 @@
 [
-    {
-        "EventCode": "0xC1",
-        "Counter": "0,1,2,3",
-        "UMask": "0x8",
-        "EventName": "OTHER_ASSISTS.AVX_STORE",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0xC1",
-        "Counter": "0,1,2,3",
-        "UMask": "0x10",
-        "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0xC1",
-        "Counter": "0,1,2,3",
-        "UMask": "0x20",
-        "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0xCA",
-        "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "FP_ASSIST.X87_OUTPUT",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Number of X87 assists due to output value.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0xCA",
-        "Counter": "0,1,2,3",
-        "UMask": "0x4",
-        "EventName": "FP_ASSIST.X87_INPUT",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Number of X87 assists due to input value.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0xCA",
-        "Counter": "0,1,2,3",
-        "UMask": "0x8",
-        "EventName": "FP_ASSIST.SIMD_OUTPUT",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Number of SIMD FP assists due to Output values.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0xCA",
-        "Counter": "0,1,2,3",
-        "UMask": "0x10",
-        "EventName": "FP_ASSIST.SIMD_INPUT",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Number of SIMD FP assists due to input values.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
     {
         "EventCode": "0x10",
         "Counter": "0,1,2,3",
         "BriefDescription": "Number of AVX-256 Computational FP double precision uops issued this cycle.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
+    {
+        "EventCode": "0xC1",
+        "Counter": "0,1,2,3",
+        "UMask": "0x8",
+        "EventName": "OTHER_ASSISTS.AVX_STORE",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0xC1",
+        "Counter": "0,1,2,3",
+        "UMask": "0x10",
+        "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0xC1",
+        "Counter": "0,1,2,3",
+        "UMask": "0x20",
+        "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0xCA",
+        "Counter": "0,1,2,3",
+        "UMask": "0x2",
+        "EventName": "FP_ASSIST.X87_OUTPUT",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Number of X87 assists due to output value.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0xCA",
+        "Counter": "0,1,2,3",
+        "UMask": "0x4",
+        "EventName": "FP_ASSIST.X87_INPUT",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Number of X87 assists due to input value.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0xCA",
+        "Counter": "0,1,2,3",
+        "UMask": "0x8",
+        "EventName": "FP_ASSIST.SIMD_OUTPUT",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Number of SIMD FP assists due to Output values.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0xCA",
+        "Counter": "0,1,2,3",
+        "UMask": "0x10",
+        "EventName": "FP_ASSIST.SIMD_INPUT",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Number of SIMD FP assists due to input values.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
     {
         "EventCode": "0xCA",
         "Counter": "0,1,2,3",
index 1b7b1dd36c68ec25614a836aa6a0387bd92ad94f..e58ed14a204cc8dc69e64eaaeeebb4b6861f41e3 100644 (file)
@@ -1,23 +1,4 @@
 [
-    {
-        "EventCode": "0x80",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "ICACHE.HIT",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
-        "EventCode": "0x80",
-        "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "ICACHE.MISSES",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
     {
         "EventCode": "0x79",
         "Counter": "0,1,2,3",
     {
         "EventCode": "0x79",
         "Counter": "0,1,2,3",
-        "UMask": "0x8",
-        "EventName": "IDQ.DSB_UOPS",
+        "UMask": "0x4",
+        "EventName": "IDQ.MITE_CYCLES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+        "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+        "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x79",
         "Counter": "0,1,2,3",
-        "UMask": "0x10",
-        "EventName": "IDQ.MS_DSB_UOPS",
+        "UMask": "0x8",
+        "EventName": "IDQ.DSB_UOPS",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+        "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x79",
         "Counter": "0,1,2,3",
-        "UMask": "0x20",
-        "EventName": "IDQ.MS_MITE_UOPS",
+        "UMask": "0x8",
+        "EventName": "IDQ.DSB_CYCLES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+        "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+        "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x79",
         "Counter": "0,1,2,3",
-        "UMask": "0x30",
-        "EventName": "IDQ.MS_UOPS",
+        "UMask": "0x10",
+        "EventName": "IDQ.MS_DSB_UOPS",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+        "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops.  Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder.  Using other instructions, if possible, will usually improve performance.  See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more information.",
         "EventCode": "0x79",
         "Counter": "0,1,2,3",
-        "UMask": "0x30",
-        "EventName": "IDQ.MS_CYCLES",
+        "UMask": "0x10",
+        "EventName": "IDQ.MS_DSB_CYCLES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+        "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
         "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled.  In the ideal case 4 uops can be delivered each cycle.  The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them.  This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
-        "EventCode": "0x9C",
+        "EventCode": "0x79",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+        "UMask": "0x10",
+        "EdgeDetect": "1",
+        "EventName": "IDQ.MS_DSB_OCCUR",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
-        "CounterHTOff": "0,1,2,3"
+        "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
+        "CounterMask": "1",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x9C",
+        "EventCode": "0x79",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+        "UMask": "0x18",
+        "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+        "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
         "CounterMask": "4",
-        "CounterHTOff": "0,1,2,3"
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x9C",
+        "EventCode": "0x79",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+        "UMask": "0x18",
+        "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
-        "CounterMask": "3",
-        "CounterHTOff": "0,1,2,3"
+        "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.",
+        "CounterMask": "1",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xAB",
+        "EventCode": "0x79",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "DSB2MITE_SWITCHES.COUNT",
+        "UMask": "0x20",
+        "EventName": "IDQ.MS_MITE_UOPS",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
+        "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline.  It excludes cycles when the back-end cannot  accept new micro-ops.  The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
-        "EventCode": "0xAB",
+        "EventCode": "0x79",
         "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+        "UMask": "0x24",
+        "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+        "BriefDescription": "Cycles MITE is delivering 4 Uops.",
+        "CounterMask": "4",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xAC",
+        "EventCode": "0x79",
         "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "DSB_FILL.OTHER_CANCEL",
+        "UMask": "0x24",
+        "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
+        "BriefDescription": "Cycles MITE is delivering any Uop.",
+        "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xAC",
+        "EventCode": "0x79",
         "Counter": "0,1,2,3",
-        "UMask": "0x8",
-        "EventName": "DSB_FILL.EXCEED_DSB_LINES",
+        "UMask": "0x30",
+        "EventName": "IDQ.MS_UOPS",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
+        "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
+        "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops.  Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder.  Using other instructions, if possible, will usually improve performance.  See the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual for more information.",
         "EventCode": "0x79",
         "Counter": "0,1,2,3",
-        "UMask": "0x4",
-        "EventName": "IDQ.MITE_CYCLES",
+        "UMask": "0x30",
+        "EventName": "IDQ.MS_CYCLES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+        "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
         "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x79",
         "Counter": "0,1,2,3",
-        "UMask": "0x8",
-        "EventName": "IDQ.DSB_CYCLES",
+        "UMask": "0x30",
+        "EdgeDetect": "1",
+        "EventName": "IDQ.MS_SWITCHES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+        "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
         "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x79",
         "Counter": "0,1,2,3",
-        "UMask": "0x10",
-        "EventName": "IDQ.MS_DSB_CYCLES",
+        "UMask": "0x3c",
+        "EventName": "IDQ.MITE_ALL_UOPS",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
-        "CounterMask": "1",
+        "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x79",
+        "EventCode": "0x80",
         "Counter": "0,1,2,3",
-        "UMask": "0x10",
-        "EdgeDetect": "1",
-        "EventName": "IDQ.MS_DSB_OCCUR",
+        "UMask": "0x1",
+        "EventName": "ICACHE.HIT",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
-        "CounterMask": "1",
+        "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
+    {
+        "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
+        "EventCode": "0x80",
+        "Counter": "0,1,2,3",
+        "UMask": "0x2",
+        "EventName": "ICACHE.MISSES",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled.  In the ideal case 4 uops can be delivered each cycle.  The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them.  This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
+        "EventCode": "0x9C",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "EventCode": "0x9C",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+        "CounterMask": "4",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "EventCode": "0x9C",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+        "CounterMask": "3",
+        "CounterHTOff": "0,1,2,3"
+    },
     {
         "EventCode": "0x9C",
         "Counter": "0,1,2,3",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x79",
-        "Counter": "0,1,2,3",
-        "UMask": "0x18",
-        "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
-        "CounterMask": "4",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0x79",
+        "EventCode": "0x9C",
+        "Invert": "1",
         "Counter": "0,1,2,3",
-        "UMask": "0x18",
-        "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+        "UMask": "0x1",
+        "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.",
+        "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
         "CounterMask": "1",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x79",
+        "EventCode": "0xAB",
         "Counter": "0,1,2,3",
-        "UMask": "0x24",
-        "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+        "UMask": "0x1",
+        "EventName": "DSB2MITE_SWITCHES.COUNT",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles MITE is delivering 4 Uops.",
-        "CounterMask": "4",
+        "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x79",
+        "PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline.  It excludes cycles when the back-end cannot  accept new micro-ops.  The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
+        "EventCode": "0xAB",
         "Counter": "0,1,2,3",
-        "UMask": "0x24",
-        "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+        "UMask": "0x2",
+        "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles MITE is delivering any Uop.",
-        "CounterMask": "1",
+        "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xAC",
         "Counter": "0,1,2,3",
-        "UMask": "0xa",
-        "EventName": "DSB_FILL.ALL_CANCEL",
+        "UMask": "0x2",
+        "EventName": "DSB_FILL.OTHER_CANCEL",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
+        "BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x9C",
-        "Invert": "1",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
-        "CounterMask": "1",
-        "CounterHTOff": "0,1,2,3"
-    },
-    {
-        "EventCode": "0x79",
+        "EventCode": "0xAC",
         "Counter": "0,1,2,3",
-        "UMask": "0x3c",
-        "EventName": "IDQ.MITE_ALL_UOPS",
+        "UMask": "0x8",
+        "EventName": "DSB_FILL.EXCEED_DSB_LINES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
+        "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x79",
+        "EventCode": "0xAC",
         "Counter": "0,1,2,3",
-        "UMask": "0x30",
-        "EdgeDetect": "1",
-        "EventName": "IDQ.MS_SWITCHES",
+        "UMask": "0xa",
+        "EventName": "DSB_FILL.ALL_CANCEL",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
-        "CounterMask": "1",
+        "BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     }
 ]
\ No newline at end of file
index e6dfa89d00f3f8d78143f75fcbe68e1e520237a3..78c1a987f9a2294a4194d3cdb30089a46c9d2908 100644 (file)
@@ -1,4 +1,31 @@
 [
+    {
+        "EventCode": "0x05",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "MISALIGN_MEM_REF.LOADS",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0x05",
+        "Counter": "0,1,2,3",
+        "UMask": "0x2",
+        "EventName": "MISALIGN_MEM_REF.STORES",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0xBE",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "PAGE_WALKS.LLC_MISS",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Number of any page walk that had a miss in LLC. Does not necessary cause a SUSPEND.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
     {
         "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from memory disambiguation, external snoops, or cross SMT-HW-thread snoop (stores) hitting load buffers.  Machine clears can have a significant performance impact if they are happening frequently.",
         "EventCode": "0xC3",
         "TakenAlone": "1",
         "CounterHTOff": "3"
     },
-    {
-        "EventCode": "0xBE",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "PAGE_WALKS.LLC_MISS",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Number of any page walk that had a miss in LLC. Does not necessary cause a SUSPEND.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0x05",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "MISALIGN_MEM_REF.LOADS",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0x05",
-        "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "MISALIGN_MEM_REF.STORES",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
     {
         "EventCode": "0xB7, 0xBB",
         "MSRValue": "0x300400244",
         "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_MISS_LOCAL.DRAM",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": " REQUEST = ANY_REQUEST and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+        "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventName": "OFFCORE_RESPONSE.DATA_IN_SOCKET.LLC_MISS_LOCAL.ANY_LLC_HIT",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": " REQUEST = DATA_IN_SOCKET and RESPONSE = LLC_MISS_LOCAL and SNOOP = ANY_LLC_HIT",
+        "BriefDescription": "REQUEST = DATA_IN_SOCKET and RESPONSE = LLC_MISS_LOCAL and SNOOP = ANY_LLC_HIT",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_MISS_LOCAL.DRAM",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": " REQUEST = DEMAND_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+        "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_MISS_LOCAL.DRAM",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": " REQUEST = PF_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+        "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_MISS_LOCAL.DRAM",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": " REQUEST = PF_RFO and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+        "BriefDescription": "REQUEST = PF_RFO and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventName": "OFFCORE_RESPONSE.PF_L_DATA_RD.LLC_MISS_LOCAL.DRAM",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": " REQUEST = PF_LLC_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+        "BriefDescription": "REQUEST = PF_LLC_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventName": "OFFCORE_RESPONSE.PF_L_IFETCH.LLC_MISS_LOCAL.DRAM",
         "MSRIndex": "0x1a6,0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": " REQUEST = PF_LLC_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+        "BriefDescription": "REQUEST = PF_LLC_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
         "CounterHTOff": "0,1,2,3"
     }
 ]
\ No newline at end of file
index 64b195b82c502c8b37fe2b06aada950b38c16446..874eb40a2e0f737c5a04ef72750de9bfb5ae3b1e 100644 (file)
@@ -8,6 +8,15 @@
         "BriefDescription": "Valid instructions written to IQ per cycle.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
+    {
+        "EventCode": "0x4E",
+        "Counter": "0,1,2,3",
+        "UMask": "0x2",
+        "EventName": "HW_PRE_REQ.DL1_MISS",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
     {
         "EventCode": "0x5C",
         "Counter": "0,1,2,3",
         "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
-    {
-        "EventCode": "0x4E",
-        "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "HW_PRE_REQ.DL1_MISS",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
     {
         "EventCode": "0x63",
         "Counter": "0,1,2,3",
index 34a519d9bfa045add6274c830b573ad3d2e4058d..b7150f65f16d640223c680b226e4ad220e84c5f0 100644 (file)
 [
     {
-        "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. ",
-        "EventCode": "0x00",
-        "Counter": "Fixed counter 1",
+        "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+        "Counter": "Fixed counter 2",
+        "UMask": "0x3",
+        "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Reference cycles when the core is not in halt state.",
+        "CounterHTOff": "Fixed counter 2"
+    },
+    {
+        "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.",
+        "Counter": "Fixed counter 0",
         "UMask": "0x1",
         "EventName": "INST_RETIRED.ANY",
         "SampleAfterValue": "2000003",
         "BriefDescription": "Instructions retired from execution.",
-        "CounterHTOff": "Fixed counter 1"
+        "CounterHTOff": "Fixed counter 0"
     },
     {
-        "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ",
-        "EventCode": "0x00",
-        "Counter": "Fixed counter 2",
+        "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+        "Counter": "Fixed counter 1",
         "UMask": "0x2",
         "EventName": "CPU_CLK_UNHALTED.THREAD",
         "SampleAfterValue": "2000003",
         "BriefDescription": "Core cycles when the thread is not in halt state.",
-        "CounterHTOff": "Fixed counter 2"
+        "CounterHTOff": "Fixed counter 1"
     },
     {
-        "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ",
-        "EventCode": "0x00",
-        "Counter": "Fixed counter 3",
-        "UMask": "0x3",
-        "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+        "Counter": "Fixed counter 1",
+        "UMask": "0x2",
+        "AnyThread": "1",
+        "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Reference cycles when the core is not in halt state.",
-        "CounterHTOff": "Fixed counter 3"
+        "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+        "CounterHTOff": "Fixed counter 1"
     },
     {
-        "EventCode": "0x88",
+        "EventCode": "0x03",
         "Counter": "0,1,2,3",
-        "UMask": "0x41",
-        "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Not taken macro-conditional branches.",
+        "UMask": "0x1",
+        "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x88",
+        "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load.  The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store.  See the table of not supported store forwards in the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual.  The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
+        "EventCode": "0x03",
         "Counter": "0,1,2,3",
-        "UMask": "0x81",
-        "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Taken speculative and retired macro-conditional branches.",
+        "UMask": "0x2",
+        "EventName": "LD_BLOCKS.STORE_FORWARD",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x88",
+        "EventCode": "0x03",
         "Counter": "0,1,2,3",
-        "UMask": "0x82",
-        "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+        "UMask": "0x8",
+        "EventName": "LD_BLOCKS.NO_SR",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x88",
+        "EventCode": "0x03",
         "Counter": "0,1,2,3",
-        "UMask": "0x84",
-        "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+        "UMask": "0x10",
+        "EventName": "LD_BLOCKS.ALL_BLOCK",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x88",
+        "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K.  This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline.  The enhanced address check typically has a performance penalty of 5 cycles.",
+        "EventCode": "0x07",
         "Counter": "0,1,2,3",
-        "UMask": "0x88",
-        "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+        "UMask": "0x1",
+        "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "False dependencies in MOB due to partial compare.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x88",
+        "EventCode": "0x07",
         "Counter": "0,1,2,3",
-        "UMask": "0x90",
-        "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Taken speculative and retired direct near calls.",
+        "UMask": "0x8",
+        "EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x88",
+        "EventCode": "0x0D",
         "Counter": "0,1,2,3",
-        "UMask": "0xa0",
-        "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Taken speculative and retired indirect calls.",
+        "UMask": "0x3",
+        "EventName": "INT_MISC.RECOVERY_CYCLES",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+        "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x88",
+        "EventCode": "0x0D",
         "Counter": "0,1,2,3",
-        "UMask": "0xc1",
-        "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Speculative and retired macro-conditional branches.",
+        "UMask": "0x3",
+        "EdgeDetect": "1",
+        "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+        "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x88",
+        "EventCode": "0x0D",
         "Counter": "0,1,2,3",
-        "UMask": "0xc2",
-        "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+        "UMask": "0x3",
+        "AnyThread": "1",
+        "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+        "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x88",
+        "EventCode": "0x0D",
         "Counter": "0,1,2,3",
-        "UMask": "0xc4",
-        "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
+        "UMask": "0x40",
+        "EventName": "INT_MISC.RAT_STALL_CYCLES",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x88",
+        "PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.",
+        "EventCode": "0x0E",
         "Counter": "0,1,2,3",
-        "UMask": "0xc8",
-        "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Speculative and retired indirect return branches.",
+        "UMask": "0x1",
+        "EventName": "UOPS_ISSUED.ANY",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x88",
+        "EventCode": "0x0E",
+        "Invert": "1",
         "Counter": "0,1,2,3",
-        "UMask": "0xd0",
-        "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Speculative and retired direct near calls.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "UMask": "0x1",
+        "EventName": "UOPS_ISSUED.STALL_CYCLES",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+        "CounterMask": "1",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x89",
+        "EventCode": "0x0E",
+        "Invert": "1",
         "Counter": "0,1,2,3",
-        "UMask": "0x41",
-        "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "UMask": "0x1",
+        "AnyThread": "1",
+        "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+        "CounterMask": "1",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x89",
+        "EventCode": "0x14",
         "Counter": "0,1,2,3",
-        "UMask": "0x81",
-        "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+        "UMask": "0x1",
+        "EventName": "ARITH.FPU_DIV_ACTIVE",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Cycles when divider is busy executing divide operations.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x89",
+        "PublicDescription": "This event counts the number of the divide operations executed.",
+        "EventCode": "0x14",
         "Counter": "0,1,2,3",
-        "UMask": "0x84",
-        "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+        "UMask": "0x1",
+        "EdgeDetect": "1",
+        "EventName": "ARITH.FPU_DIV",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Divide operations executed.",
+        "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x89",
+        "EventCode": "0x3C",
         "Counter": "0,1,2,3",
-        "UMask": "0x88",
-        "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+        "UMask": "0x0",
+        "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Thread cycles when thread is not in halt state.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x89",
+        "EventCode": "0x3C",
         "Counter": "0,1,2,3",
-        "UMask": "0x90",
-        "EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Taken speculative and retired mispredicted direct near calls.",
+        "UMask": "0x0",
+        "AnyThread": "1",
+        "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x89",
+        "EventCode": "0x3C",
         "Counter": "0,1,2,3",
-        "UMask": "0xa0",
-        "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+        "UMask": "0x1",
+        "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x89",
+        "EventCode": "0x3C",
         "Counter": "0,1,2,3",
-        "UMask": "0xc1",
-        "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+        "UMask": "0x1",
+        "AnyThread": "1",
+        "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x89",
+        "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+        "EventCode": "0x3C",
         "Counter": "0,1,2,3",
-        "UMask": "0xc4",
-        "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+        "UMask": "0x1",
+        "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x89",
+        "EventCode": "0x3C",
         "Counter": "0,1,2,3",
-        "UMask": "0xd0",
-        "EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Speculative and retired mispredicted direct near calls.",
+        "UMask": "0x1",
+        "AnyThread": "1",
+        "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x3C",
         "Counter": "0,1,2,3",
-        "UMask": "0x0",
-        "EventName": "CPU_CLK_UNHALTED.THREAD_P",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Thread cycles when thread is not in halt state.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0xA8",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "LSD.UOPS",
+        "UMask": "0x2",
+        "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Number of Uops delivered by the LSD.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0xA8",
+        "EventCode": "0x3C",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "LSD.CYCLES_ACTIVE",
+        "UMask": "0x2",
+        "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
-        "CounterMask": "1",
+        "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x87",
+        "EventCode": "0x4C",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "ILD_STALL.LCP",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+        "EventName": "LOAD_HIT_PRE.SW_PF",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x87",
+        "EventCode": "0x4C",
         "Counter": "0,1,2,3",
-        "UMask": "0x4",
-        "EventName": "ILD_STALL.IQ_FULL",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Stall cycles because IQ is full.",
+        "UMask": "0x2",
+        "EventName": "LOAD_HIT_PRE.HW_PF",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x0D",
+        "EventCode": "0x59",
         "Counter": "0,1,2,3",
-        "UMask": "0x40",
-        "EventName": "INT_MISC.RAT_STALL_CYCLES",
+        "UMask": "0x20",
+        "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.",
+        "BriefDescription": "Increments the number of flags-merge uops in flight each cycle.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
+        "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual.",
         "EventCode": "0x59",
         "Counter": "0,1,2,3",
         "UMask": "0x20",
-        "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP",
+        "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Increments the number of flags-merge uops in flight each cycle.",
+        "BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.",
+        "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
+        "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
         "EventCode": "0x59",
         "Counter": "0,1,2,3",
         "UMask": "0x40",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA2",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "RESOURCE_STALLS.ANY",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Resource-related stall cycles.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0xA2",
-        "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "RESOURCE_STALLS.LB",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Counts the cycles of stall due to lack of load buffers.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0xA2",
-        "Counter": "0,1,2,3",
-        "UMask": "0x4",
-        "EventName": "RESOURCE_STALLS.RS",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0xA2",
+        "EventCode": "0x5B",
         "Counter": "0,1,2,3",
-        "UMask": "0x8",
-        "EventName": "RESOURCE_STALLS.SB",
+        "UMask": "0xc",
+        "EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+        "BriefDescription": "Cycles with either free list is empty.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA2",
+        "EventCode": "0x5B",
         "Counter": "0,1,2,3",
-        "UMask": "0x10",
-        "EventName": "RESOURCE_STALLS.ROB",
+        "UMask": "0xf",
+        "EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles stalled due to re-order buffer full.",
+        "BriefDescription": "Resource stalls2 control structures full for physical registers.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.",
-        "EventCode": "0x0E",
+        "EventCode": "0x5B",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "UOPS_ISSUED.ANY",
+        "UMask": "0x4f",
+        "EventName": "RESOURCE_STALLS2.OOO_RSRC",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).",
+        "BriefDescription": "Resource stalls out of order resources full.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x0E",
-        "Invert": "1",
+        "EventCode": "0x5E",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "UOPS_ISSUED.STALL_CYCLES",
+        "EventName": "RS_EVENTS.EMPTY_CYCLES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
-        "CounterMask": "1",
-        "CounterHTOff": "0,1,2,3"
+        "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x0E",
+        "EventCode": "0x5E",
         "Invert": "1",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "AnyThread": "1",
-        "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+        "EdgeDetect": "1",
+        "EventName": "RS_EVENTS.EMPTY_END",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+        "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
         "CounterMask": "1",
-        "CounterHTOff": "0,1,2,3"
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x5E",
+        "EventCode": "0x87",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "RS_EVENTS.EMPTY_CYCLES",
+        "EventName": "ILD_STALL.LCP",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+        "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xCC",
+        "EventCode": "0x87",
         "Counter": "0,1,2,3",
-        "UMask": "0x20",
-        "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+        "UMask": "0x4",
+        "EventName": "ILD_STALL.IQ_FULL",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Count cases of saving new LBR.",
+        "BriefDescription": "Stall cycles because IQ is full.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear.  Machine clears can have a significant performance impact if they are happening frequently.",
-        "EventCode": "0xC3",
+        "EventCode": "0x88",
         "Counter": "0,1,2,3",
-        "UMask": "0x4",
-        "EventName": "MACHINE_CLEARS.SMC",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Self-modifying code (SMC) detected.",
+        "UMask": "0x41",
+        "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Not taken macro-conditional branches.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
-        "EventCode": "0xC3",
+        "EventCode": "0x88",
         "Counter": "0,1,2,3",
-        "UMask": "0x20",
-        "EventName": "MACHINE_CLEARS.MASKMOV",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+        "UMask": "0x81",
+        "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Taken speculative and retired macro-conditional branches.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xC0",
+        "EventCode": "0x88",
         "Counter": "0,1,2,3",
-        "UMask": "0x0",
-        "EventName": "INST_RETIRED.ANY_P",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Number of instructions retired. General Counter   - architectural event.",
+        "UMask": "0x82",
+        "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "PublicDescription": "This event counts the number of micro-ops retired.",
-        "EventCode": "0xC2",
+        "EventCode": "0x88",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "UOPS_RETIRED.ALL",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Actually retired uops.",
+        "UMask": "0x84",
+        "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "PublicDescription": "This event counts the number of retirement slots used each cycle.  There are potentially 4 slots that can be used each cycle - meaning, 4 micro-ops or 4 instructions could retire each cycle.  This event is used in determining the 'Retiring' category of the Top-Down pipeline slots characterization.",
-        "EventCode": "0xC2",
+        "EventCode": "0x88",
         "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Retirement slots used.",
+        "UMask": "0x88",
+        "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xC2",
-        "Invert": "1",
+        "EventCode": "0x88",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "UOPS_RETIRED.STALL_CYCLES",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles without actually retired uops.",
-        "CounterMask": "1",
-        "CounterHTOff": "0,1,2,3"
+        "UMask": "0x90",
+        "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Taken speculative and retired direct near calls.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xC2",
-        "Invert": "1",
+        "EventCode": "0x88",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles with less than 10 actually retired uops.",
-        "CounterMask": "10",
-        "CounterHTOff": "0,1,2,3"
+        "UMask": "0xa0",
+        "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Taken speculative and retired indirect calls.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "EventCode": "0xC4",
+        "EventCode": "0x88",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "BR_INST_RETIRED.CONDITIONAL",
-        "SampleAfterValue": "400009",
-        "BriefDescription": "Conditional branch instructions retired.",
+        "UMask": "0xc1",
+        "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Speculative and retired macro-conditional branches.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "EventCode": "0xC4",
+        "EventCode": "0x88",
         "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "BR_INST_RETIRED.NEAR_CALL",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Direct and indirect near call instructions retired.",
+        "UMask": "0xc2",
+        "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xC4",
+        "EventCode": "0x88",
         "Counter": "0,1,2,3",
-        "UMask": "0x0",
-        "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
-        "SampleAfterValue": "400009",
-        "BriefDescription": "All (macro) branch instructions retired.",
+        "UMask": "0xc4",
+        "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "EventCode": "0xC4",
+        "EventCode": "0x88",
         "Counter": "0,1,2,3",
-        "UMask": "0x8",
-        "EventName": "BR_INST_RETIRED.NEAR_RETURN",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Return instructions retired.",
+        "UMask": "0xc8",
+        "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Speculative and retired indirect return branches.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xC4",
+        "EventCode": "0x88",
         "Counter": "0,1,2,3",
-        "UMask": "0x10",
-        "EventName": "BR_INST_RETIRED.NOT_TAKEN",
-        "SampleAfterValue": "400009",
-        "BriefDescription": "Not taken branch instructions retired.",
+        "UMask": "0xd0",
+        "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Speculative and retired direct near calls.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "EventCode": "0xC4",
+        "EventCode": "0x88",
         "Counter": "0,1,2,3",
-        "UMask": "0x20",
-        "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
-        "SampleAfterValue": "400009",
-        "BriefDescription": "Taken branch instructions retired.",
+        "UMask": "0xff",
+        "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Speculative and retired  branches.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xC4",
+        "EventCode": "0x89",
         "Counter": "0,1,2,3",
-        "UMask": "0x40",
-        "EventName": "BR_INST_RETIRED.FAR_BRANCH",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Far branch instructions retired.",
+        "UMask": "0x41",
+        "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "2",
-        "EventCode": "0xC4",
+        "EventCode": "0x89",
         "Counter": "0,1,2,3",
-        "UMask": "0x4",
-        "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
-        "SampleAfterValue": "400009",
-        "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).",
-        "CounterHTOff": "0,1,2,3"
+        "UMask": "0x81",
+        "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "EventCode": "0xC5",
+        "EventCode": "0x89",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "BR_MISP_RETIRED.CONDITIONAL",
-        "SampleAfterValue": "400009",
-        "BriefDescription": "Mispredicted conditional branch instructions retired.",
+        "UMask": "0x84",
+        "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "EventCode": "0xC5",
+        "EventCode": "0x89",
         "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "BR_MISP_RETIRED.NEAR_CALL",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Direct and indirect mispredicted near call instructions retired.",
+        "UMask": "0x88",
+        "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xC5",
+        "EventCode": "0x89",
         "Counter": "0,1,2,3",
-        "UMask": "0x0",
-        "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
-        "SampleAfterValue": "400009",
-        "BriefDescription": "All mispredicted macro branch instructions retired.",
+        "UMask": "0x90",
+        "EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Taken speculative and retired mispredicted direct near calls.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "EventCode": "0xC5",
+        "EventCode": "0x89",
         "Counter": "0,1,2,3",
-        "UMask": "0x10",
-        "EventName": "BR_MISP_RETIRED.NOT_TAKEN",
-        "SampleAfterValue": "400009",
-        "BriefDescription": "Mispredicted not taken branch instructions retired.",
+        "UMask": "0xa0",
+        "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "1",
-        "EventCode": "0xC5",
+        "EventCode": "0x89",
         "Counter": "0,1,2,3",
-        "UMask": "0x20",
-        "EventName": "BR_MISP_RETIRED.TAKEN",
-        "SampleAfterValue": "400009",
-        "BriefDescription": "Mispredicted taken branch instructions retired.",
+        "UMask": "0xc1",
+        "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PEBS": "2",
-        "PublicDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
-        "EventCode": "0xC5",
+        "EventCode": "0x89",
         "Counter": "0,1,2,3",
-        "UMask": "0x4",
-        "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
-        "SampleAfterValue": "400009",
-        "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).",
-        "CounterHTOff": "0,1,2,3"
+        "UMask": "0xc4",
+        "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xC1",
+        "EventCode": "0x89",
         "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Retired instructions experiencing ITLB misses.",
+        "UMask": "0xd0",
+        "EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Speculative and retired mispredicted direct near calls.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x14",
+        "EventCode": "0x89",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "ARITH.FPU_DIV_ACTIVE",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles when divider is busy executing divide operations.",
+        "UMask": "0xff",
+        "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts the number of the divide operations executed.",
-        "EventCode": "0x14",
+        "EventCode": "0xA1",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EdgeDetect": "1",
-        "EventName": "ARITH.FPU_DIV",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Divide operations executed.",
-        "CounterMask": "1",
+        "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Cycles per thread when uops are dispatched to port 0.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xB1",
+        "EventCode": "0xA1",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "UOPS_DISPATCHED.THREAD",
+        "AnyThread": "1",
+        "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Uops dispatched per thread.",
+        "BriefDescription": "Cycles per core when uops are dispatched to port 0.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xB1",
+        "EventCode": "0xA1",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
-        "EventName": "UOPS_DISPATCHED.CORE",
+        "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Uops dispatched from any thread.",
+        "BriefDescription": "Cycles per thread when uops are dispatched to port 1.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xA1",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+        "UMask": "0x2",
+        "AnyThread": "1",
+        "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles per thread when uops are dispatched to port 0.",
+        "BriefDescription": "Cycles per core when uops are dispatched to port 1.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xA1",
         "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+        "UMask": "0xc",
+        "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles per thread when uops are dispatched to port 1.",
+        "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xA1",
         "Counter": "0,1,2,3",
-        "UMask": "0x40",
-        "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+        "UMask": "0xc",
+        "AnyThread": "1",
+        "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles per thread when uops are dispatched to port 4.",
+        "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xA1",
         "Counter": "0,1,2,3",
-        "UMask": "0x80",
-        "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+        "UMask": "0x30",
+        "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles per thread when uops are dispatched to port 5.",
+        "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA3",
+        "EventCode": "0xA1",
         "Counter": "0,1,2,3",
-        "UMask": "0x4",
-        "EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH",
+        "UMask": "0x30",
+        "AnyThread": "1",
+        "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.",
-        "CounterMask": "4",
-        "CounterHTOff": "0,1,2,3"
+        "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA3",
-        "Counter": "2",
-        "UMask": "0x2",
-        "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+        "EventCode": "0xA1",
+        "Counter": "0,1,2,3",
+        "UMask": "0x40",
+        "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
-        "CounterMask": "2",
-        "CounterHTOff": "2"
+        "BriefDescription": "Cycles per thread when uops are dispatched to port 4.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA3",
+        "EventCode": "0xA1",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+        "UMask": "0x40",
+        "AnyThread": "1",
+        "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.",
-        "CounterMask": "1",
+        "BriefDescription": "Cycles per core when uops are dispatched to port 4.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA3",
-        "Counter": "2",
-        "UMask": "0x6",
-        "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+        "EventCode": "0xA1",
+        "Counter": "0,1,2,3",
+        "UMask": "0x80",
+        "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
-        "CounterMask": "6",
-        "CounterHTOff": "2"
+        "BriefDescription": "Cycles per thread when uops are dispatched to port 5.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA3",
+        "EventCode": "0xA1",
         "Counter": "0,1,2,3",
-        "UMask": "0x5",
-        "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+        "UMask": "0x80",
+        "AnyThread": "1",
+        "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.",
-        "CounterMask": "5",
-        "CounterHTOff": "0,1,2,3"
+        "BriefDescription": "Cycles per core when uops are dispatched to port 5.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x4C",
+        "EventCode": "0xA2",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "LOAD_HIT_PRE.SW_PF",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.",
+        "EventName": "RESOURCE_STALLS.ANY",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Resource-related stall cycles.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x4C",
+        "EventCode": "0xA2",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
-        "EventName": "LOAD_HIT_PRE.HW_PF",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.",
+        "EventName": "RESOURCE_STALLS.LB",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Counts the cycles of stall due to lack of load buffers.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x03",
+        "EventCode": "0xA2",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "LD_BLOCKS.DATA_UNKNOWN",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.",
+        "UMask": "0x4",
+        "EventName": "RESOURCE_STALLS.RS",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load.  The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store.  See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual.  The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
-        "EventCode": "0x03",
+        "EventCode": "0xA2",
         "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "LD_BLOCKS.STORE_FORWARD",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.",
+        "UMask": "0x8",
+        "EventName": "RESOURCE_STALLS.SB",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x03",
+        "EventCode": "0xA2",
         "Counter": "0,1,2,3",
-        "UMask": "0x8",
-        "EventName": "LD_BLOCKS.NO_SR",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+        "UMask": "0xa",
+        "EventName": "RESOURCE_STALLS.LB_SB",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Resource stalls due to load or store buffers all being in use.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x03",
+        "EventCode": "0xA2",
+        "Counter": "0,1,2,3",
+        "UMask": "0xe",
+        "EventName": "RESOURCE_STALLS.MEM_RS",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0xA2",
         "Counter": "0,1,2,3",
         "UMask": "0x10",
-        "EventName": "LD_BLOCKS.ALL_BLOCK",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).",
+        "EventName": "RESOURCE_STALLS.ROB",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Cycles stalled due to re-order buffer full.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K.  This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline.  The enhanced address check typically has a performance penalty of 5 cycles.",
-        "EventCode": "0x07",
+        "EventCode": "0xA2",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "False dependencies in MOB due to partial compare.",
+        "UMask": "0xf0",
+        "EventName": "RESOURCE_STALLS.OOO_RSRC",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x07",
+        "EventCode": "0xA3",
         "Counter": "0,1,2,3",
-        "UMask": "0x8",
-        "EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.",
+        "UMask": "0x1",
+        "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.",
+        "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xB6",
+        "EventCode": "0xA3",
+        "Counter": "2",
+        "UMask": "0x2",
+        "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+        "CounterMask": "2",
+        "CounterHTOff": "2"
+    },
+    {
+        "EventCode": "0xA3",
+        "Counter": "0,1,2,3",
+        "UMask": "0x4",
+        "EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.",
+        "CounterMask": "4",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "EventCode": "0xA3",
+        "Counter": "0,1,2,3",
+        "UMask": "0x5",
+        "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.",
+        "CounterMask": "5",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "EventCode": "0xA3",
+        "Counter": "2",
+        "UMask": "0x6",
+        "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+        "CounterMask": "6",
+        "CounterHTOff": "2"
+    },
+    {
+        "EventCode": "0xA8",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "AGU_BYPASS_CANCEL.COUNT",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.",
+        "EventName": "LSD.UOPS",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Number of Uops delivered by the LSD.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x3C",
+        "EventCode": "0xA8",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+        "EventName": "LSD.CYCLES_ACTIVE",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+        "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+        "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x3C",
+        "EventCode": "0xA8",
         "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+        "UMask": "0x1",
+        "EventName": "LSD.CYCLES_4_UOPS",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
-        "CounterHTOff": "0,1,2,3"
+        "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+        "CounterMask": "4",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA1",
+        "EventCode": "0xB1",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "AnyThread": "1",
-        "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+        "EventName": "UOPS_DISPATCHED.THREAD",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles per core when uops are dispatched to port 0.",
+        "BriefDescription": "Uops dispatched per thread.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA1",
+        "EventCode": "0xB1",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
-        "AnyThread": "1",
-        "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+        "EventName": "UOPS_DISPATCHED.CORE",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles per core when uops are dispatched to port 1.",
+        "BriefDescription": "Uops dispatched from any thread.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA1",
+        "EventCode": "0xB1",
         "Counter": "0,1,2,3",
-        "UMask": "0x40",
-        "AnyThread": "1",
-        "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+        "UMask": "0x2",
+        "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles per core when uops are dispatched to port 4.",
+        "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+        "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA1",
+        "EventCode": "0xB1",
         "Counter": "0,1,2,3",
-        "UMask": "0x80",
-        "AnyThread": "1",
-        "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+        "UMask": "0x2",
+        "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles per core when uops are dispatched to port 5.",
+        "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+        "CounterMask": "2",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA1",
+        "EventCode": "0xB1",
         "Counter": "0,1,2,3",
-        "UMask": "0xc",
-        "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+        "UMask": "0x2",
+        "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.",
+        "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+        "CounterMask": "3",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA1",
+        "EventCode": "0xB1",
         "Counter": "0,1,2,3",
-        "UMask": "0x30",
-        "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+        "UMask": "0x2",
+        "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.",
+        "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+        "CounterMask": "4",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA1",
+        "EventCode": "0xB1",
+        "Invert": "1",
         "Counter": "0,1,2,3",
-        "UMask": "0xc",
-        "AnyThread": "1",
-        "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+        "UMask": "0x2",
+        "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.",
+        "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA1",
+        "EventCode": "0xB6",
         "Counter": "0,1,2,3",
-        "UMask": "0x30",
-        "AnyThread": "1",
-        "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+        "UMask": "0x1",
+        "EventName": "AGU_BYPASS_CANCEL.COUNT",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0xC0",
+        "Counter": "0,1,2,3",
+        "UMask": "0x0",
+        "EventName": "INST_RETIRED.ANY_P",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+        "BriefDescription": "Number of instructions retired. General Counter   - architectural event.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "CounterHTOff": "1"
     },
     {
-        "EventCode": "0x5B",
+        "EventCode": "0xC1",
         "Counter": "0,1,2,3",
-        "UMask": "0xf",
-        "EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Resource stalls2 control structures full for physical registers.",
+        "UMask": "0x2",
+        "EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Retired instructions experiencing ITLB misses.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x5B",
+        "PEBS": "1",
+        "PublicDescription": "This event counts the number of micro-ops retired. (Precise Event)",
+        "EventCode": "0xC2",
         "Counter": "0,1,2,3",
-        "UMask": "0xc",
-        "EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY",
+        "UMask": "0x1",
+        "EventName": "UOPS_RETIRED.ALL",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles with either free list is empty.",
+        "BriefDescription": "Actually retired uops. (Precise Event - PEBS).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA2",
+        "EventCode": "0xC2",
+        "Invert": "1",
         "Counter": "0,1,2,3",
-        "UMask": "0xe",
-        "EventName": "RESOURCE_STALLS.MEM_RS",
+        "UMask": "0x1",
+        "EventName": "UOPS_RETIRED.STALL_CYCLES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "BriefDescription": "Cycles without actually retired uops.",
+        "CounterMask": "1",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0xA2",
+        "EventCode": "0xC2",
+        "Invert": "1",
         "Counter": "0,1,2,3",
-        "UMask": "0xf0",
-        "EventName": "RESOURCE_STALLS.OOO_RSRC",
+        "UMask": "0x1",
+        "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0x5B",
-        "Counter": "0,1,2,3",
-        "UMask": "0x4f",
-        "EventName": "RESOURCE_STALLS2.OOO_RSRC",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Resource stalls out of order resources full.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0xA2",
-        "Counter": "0,1,2,3",
-        "UMask": "0xa",
-        "EventName": "RESOURCE_STALLS.LB_SB",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Resource stalls due to load or store buffers all being in use.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "BriefDescription": "Cycles with less than 10 actually retired uops.",
+        "CounterMask": "10",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x0D",
+        "EventCode": "0xC2",
+        "Invert": "1",
         "Counter": "0,1,2,3",
-        "UMask": "0x3",
-        "EventName": "INT_MISC.RECOVERY_CYCLES",
+        "UMask": "0x1",
+        "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+        "BriefDescription": "Cycles without actually retired uops.",
         "CounterMask": "1",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel? 64 and IA-32 Architectures Optimization Reference Manual.",
-        "EventCode": "0x59",
+        "PEBS": "1",
+        "PublicDescription": "This event counts the number of retirement slots used each cycle.  There are potentially 4 slots that can be used each cycle - meaning, 4 micro-ops or 4 instructions could retire each cycle.  This event is used in determining the 'Retiring' category of the Top-Down pipeline slots characterization. (Precise Event - PEBS)",
+        "EventCode": "0xC2",
         "Counter": "0,1,2,3",
-        "UMask": "0x20",
-        "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
+        "UMask": "0x2",
+        "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.",
-        "CounterMask": "1",
+        "BriefDescription": "Retirement slots used. (Precise Event - PEBS).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x0D",
+        "EventCode": "0xc3",
         "Counter": "0,1,2,3",
-        "UMask": "0x3",
+        "UMask": "0x1",
         "EdgeDetect": "1",
-        "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+        "EventName": "MACHINE_CLEARS.COUNT",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Number of machine clears (nukes) of any type.",
         "CounterMask": "1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xE6",
+        "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear.  Machine clears can have a significant performance impact if they are happening frequently.",
+        "EventCode": "0xC3",
         "Counter": "0,1,2,3",
-        "UMask": "0x1f",
-        "EventName": "BACLEARS.ANY",
+        "UMask": "0x4",
+        "EventName": "MACHINE_CLEARS.SMC",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+        "BriefDescription": "Self-modifying code (SMC) detected.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x88",
+        "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
+        "EventCode": "0xC3",
         "Counter": "0,1,2,3",
-        "UMask": "0xff",
-        "EventName": "BR_INST_EXEC.ALL_BRANCHES",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Speculative and retired  branches.",
+        "UMask": "0x20",
+        "EventName": "MACHINE_CLEARS.MASKMOV",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x89",
+        "EventCode": "0xC4",
         "Counter": "0,1,2,3",
-        "UMask": "0xff",
-        "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
-        "SampleAfterValue": "200003",
-        "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+        "UMask": "0x0",
+        "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+        "SampleAfterValue": "400009",
+        "BriefDescription": "All (macro) branch instructions retired.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xC2",
-        "Invert": "1",
+        "PEBS": "1",
+        "EventCode": "0xC4",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles without actually retired uops.",
-        "CounterMask": "1",
-        "CounterHTOff": "0,1,2,3"
+        "EventName": "BR_INST_RETIRED.CONDITIONAL",
+        "SampleAfterValue": "400009",
+        "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA8",
+        "PEBS": "1",
+        "EventCode": "0xC4",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "LSD.CYCLES_4_UOPS",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
-        "CounterMask": "4",
+        "UMask": "0x2",
+        "EventName": "BR_INST_RETIRED.NEAR_CALL",
+        "SampleAfterValue": "100007",
+        "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xc3",
+        "PEBS": "1",
+        "EventCode": "0xC4",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EdgeDetect": "1",
-        "EventName": "MACHINE_CLEARS.COUNT",
-        "SampleAfterValue": "100003",
-        "BriefDescription": "Number of machine clears (nukes) of any type.",
-        "CounterMask": "1",
+        "UMask": "0x2",
+        "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+        "SampleAfterValue": "100007",
+        "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x5E",
-        "Invert": "1",
+        "PEBS": "2",
+        "EventCode": "0xC4",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EdgeDetect": "1",
-        "EventName": "RS_EVENTS.EMPTY_END",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
-        "CounterMask": "1",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "UMask": "0x4",
+        "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+        "SampleAfterValue": "400009",
+        "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0x00",
-        "Counter": "Fixed counter 2",
-        "UMask": "0x2",
-        "AnyThread": "1",
-        "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
-        "CounterHTOff": "Fixed counter 2"
+        "PEBS": "1",
+        "EventCode": "0xC4",
+        "Counter": "0,1,2,3",
+        "UMask": "0x8",
+        "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+        "SampleAfterValue": "100007",
+        "BriefDescription": "Return instructions retired. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x3C",
+        "EventCode": "0xC4",
         "Counter": "0,1,2,3",
-        "UMask": "0x0",
-        "AnyThread": "1",
-        "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+        "UMask": "0x10",
+        "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+        "SampleAfterValue": "400009",
+        "BriefDescription": "Not taken branch instructions retired.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x3C",
+        "PEBS": "1",
+        "EventCode": "0xC4",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "AnyThread": "1",
-        "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+        "UMask": "0x20",
+        "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+        "SampleAfterValue": "400009",
+        "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x0D",
+        "EventCode": "0xC4",
         "Counter": "0,1,2,3",
-        "UMask": "0x3",
-        "AnyThread": "1",
-        "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
-        "CounterMask": "1",
+        "UMask": "0x40",
+        "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+        "SampleAfterValue": "100007",
+        "BriefDescription": "Far branch instructions retired.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xB1",
+        "EventCode": "0xC5",
         "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
-        "CounterMask": "1",
+        "UMask": "0x0",
+        "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+        "SampleAfterValue": "400009",
+        "BriefDescription": "All mispredicted macro branch instructions retired.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xB1",
+        "PEBS": "1",
+        "EventCode": "0xC5",
         "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
-        "CounterMask": "2",
+        "UMask": "0x1",
+        "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+        "SampleAfterValue": "400009",
+        "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xB1",
+        "PEBS": "1",
+        "EventCode": "0xC5",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
-        "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
-        "CounterMask": "3",
+        "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+        "SampleAfterValue": "100007",
+        "BriefDescription": "Direct and indirect mispredicted near call instructions retired. (Precise Event - PEBS).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xB1",
+        "PEBS": "2",
+        "PublicDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
+        "EventCode": "0xC5",
         "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
-        "CounterMask": "4",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
+        "UMask": "0x4",
+        "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+        "SampleAfterValue": "400009",
+        "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).",
+        "CounterHTOff": "0,1,2,3"
     },
     {
-        "EventCode": "0xB1",
-        "Invert": "1",
+        "PEBS": "1",
+        "EventCode": "0xC5",
         "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+        "UMask": "0x10",
+        "EventName": "BR_MISP_RETIRED.NOT_TAKEN",
+        "SampleAfterValue": "400009",
+        "BriefDescription": "Mispredicted not taken branch instructions retired.(Precise Event - PEBS).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
-        "EventCode": "0x3C",
+        "PEBS": "1",
+        "EventCode": "0xC5",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+        "UMask": "0x20",
+        "EventName": "BR_MISP_RETIRED.TAKEN",
+        "SampleAfterValue": "400009",
+        "BriefDescription": "Mispredicted taken branch instructions retired. (Precise Event - PEBS).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x3C",
+        "EventCode": "0xCC",
         "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "AnyThread": "1",
-        "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+        "UMask": "0x20",
+        "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+        "BriefDescription": "Count cases of saving new LBR.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x3C",
+        "EventCode": "0xE6",
         "Counter": "0,1,2,3",
-        "UMask": "0x2",
-        "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+        "UMask": "0x1f",
+        "EventName": "BACLEARS.ANY",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     }
 ]
\ No newline at end of file
index fd7d7c438226b9d5cbdc1878e1306cc8fddc68b8..cfeba5067bab5404019d8ae868035dd2759c28fd 100644 (file)
 [
     {
-        "BriefDescription": "Instructions Per Cycle (per logical thread)",
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Frontend_Bound"
+    },
+    {
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Frontend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Bad_Speculation"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Bad_Speculation_SMT"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Backend_Bound"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Backend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Retiring"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Retiring_SMT"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Instructions Per Cycle (per logical thread)",
         "MetricGroup": "TopDownL1",
         "MetricName": "IPC"
     },
     {
-        "BriefDescription": "Uops Per Instruction",
         "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
-        "MetricGroup": "Pipeline",
+        "BriefDescription": "Uops Per Instruction",
+        "MetricGroup": "Pipeline;Retiring",
         "MetricName": "UPI"
     },
     {
-        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
-        "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
-        "MetricGroup": "Frontend",
+        "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
+        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+        "MetricGroup": "PGO",
         "MetricName": "IFetch_Line_Utilization"
     },
     {
-        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
-        "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
-        "MetricGroup": "DSB; Frontend_Bandwidth",
+        "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+        "MetricGroup": "DSB;Frontend_Bandwidth",
         "MetricName": "DSB_Coverage"
     },
     {
-        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricGroup": "Pipeline;Summary",
         "MetricName": "CPI"
     },
     {
-        "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
         "MetricGroup": "Summary",
         "MetricName": "CLKS"
     },
     {
-        "BriefDescription": "Total issue-pipeline slots",
-        "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+        "MetricExpr": "4 * cycles",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
         "MetricGroup": "TopDownL1",
         "MetricName": "SLOTS"
     },
     {
-        "BriefDescription": "Total number of retired Instructions",
+        "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
+        "MetricGroup": "TopDownL1_SMT",
+        "MetricName": "SLOTS_SMT"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY",
+        "BriefDescription": "Total number of retired Instructions",
         "MetricGroup": "Summary",
         "MetricName": "Instructions"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / cycles",
         "BriefDescription": "Instructions Per Cycle (per physical core)",
-        "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
         "MetricGroup": "SMT",
         "MetricName": "CoreIPC"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Instructions Per Cycle (per physical core)",
+        "MetricGroup": "SMT",
+        "MetricName": "CoreIPC_SMT"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS",
+        "MetricName": "FLOPc"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS_SMT",
+        "MetricName": "FLOPc_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
         "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
-        "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
         "MetricGroup": "Pipeline;Ports_Utilization",
         "MetricName": "ILP"
     },
     {
+        "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
         "BriefDescription": "Core actual clocks when any thread is active on the physical core",
-        "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
         "MetricGroup": "SMT",
         "MetricName": "CORE_CLKS"
     },
     {
-        "BriefDescription": "Average CPU Utilization",
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+        "BriefDescription": "Average CPU Utilization",
         "MetricGroup": "Summary",
         "MetricName": "CPU_Utilization"
     },
     {
+        "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
         "BriefDescription": "Giga Floating Point Operations Per Second",
-        "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
         "MetricGroup": "FLOPS;Summary",
         "MetricName": "GFLOPs"
     },
     {
-        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricGroup": "Power",
         "MetricName": "Turbo_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricGroup": "SMT;Summary",
         "MetricName": "SMT_2T_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
     {
-        "BriefDescription": "C3 residency percent per core",
+        "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+        "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "DRAM_BW_Use"
+    },
+    {
         "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per core",
         "MetricName": "C3_Core_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per core",
         "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per core",
         "MetricName": "C6_Core_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per core",
         "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per core",
         "MetricName": "C7_Core_Residency"
     },
     {
-        "BriefDescription": "C2 residency percent per package",
         "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C2 residency percent per package",
         "MetricName": "C2_Pkg_Residency"
     },
     {
-        "BriefDescription": "C3 residency percent per package",
         "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per package",
         "MetricName": "C3_Pkg_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per package",
         "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per package",
         "MetricName": "C6_Pkg_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per package",
         "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per package",
         "MetricName": "C7_Pkg_Residency"
     }
 ]
index a654ab771fce7a5a245bdd62e6b7994b4b8efce0..b8eccce5d75d4a21fbbb4b56bf8908c745a3b1ef 100644 (file)
 [
     {
-        "EventCode": "0xAE",
-        "Counter": "0,1,2,3",
-        "UMask": "0x1",
-        "EventName": "ITLB.ITLB_FLUSH",
-        "SampleAfterValue": "100007",
-        "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0x4F",
-        "Counter": "0,1,2,3",
-        "UMask": "0x10",
-        "EventName": "EPT.WALK_CYCLES",
-        "SampleAfterValue": "2000003",
-        "BriefDescription": "Cycle count for an Extended Page table walk.  The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0x85",
+        "EventCode": "0x08",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+        "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Misses at all ITLB levels that cause page walks.",
+        "BriefDescription": "Load misses in all DTLB levels that cause page walks.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x85",
+        "EventCode": "0x08",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
-        "EventName": "ITLB_MISSES.WALK_COMPLETED",
+        "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+        "BriefDescription": "Load misses at all DTLB levels that cause completed page walks.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
-        "EventCode": "0x85",
+        "PublicDescription": "This event counts cycles when the  page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
+        "EventCode": "0x08",
         "Counter": "0,1,2,3",
         "UMask": "0x4",
-        "EventName": "ITLB_MISSES.WALK_DURATION",
+        "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
         "SampleAfterValue": "2000003",
         "BriefDescription": "Cycles when PMH is busy with page walks.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x85",
+        "PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
+        "EventCode": "0x08",
         "Counter": "0,1,2,3",
         "UMask": "0x10",
-        "EventName": "ITLB_MISSES.STLB_HIT",
+        "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+        "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x08",
+        "EventCode": "0x49",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+        "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Load misses in all DTLB levels that cause page walks.",
+        "BriefDescription": "Store misses in all DTLB levels that cause page walks.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x08",
+        "EventCode": "0x49",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
-        "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+        "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Load misses at all DTLB levels that cause completed page walks.",
+        "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts cycles when the  page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
-        "EventCode": "0x08",
+        "EventCode": "0x49",
         "Counter": "0,1,2,3",
         "UMask": "0x4",
-        "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+        "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
         "SampleAfterValue": "2000003",
         "BriefDescription": "Cycles when PMH is busy with page walks.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
-        "EventCode": "0x08",
+        "EventCode": "0x49",
         "Counter": "0,1,2,3",
         "UMask": "0x10",
-        "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+        "EventName": "DTLB_STORE_MISSES.STLB_HIT",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+        "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x49",
+        "EventCode": "0x4F",
+        "Counter": "0,1,2,3",
+        "UMask": "0x10",
+        "EventName": "EPT.WALK_CYCLES",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Cycle count for an Extended Page table walk.  The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0x85",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+        "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Store misses in all DTLB levels that cause page walks.",
+        "BriefDescription": "Misses at all ITLB levels that cause page walks.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x49",
+        "EventCode": "0x85",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
-        "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+        "EventName": "ITLB_MISSES.WALK_COMPLETED",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+        "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x49",
+        "PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
+        "EventCode": "0x85",
         "Counter": "0,1,2,3",
         "UMask": "0x4",
-        "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+        "EventName": "ITLB_MISSES.WALK_DURATION",
         "SampleAfterValue": "2000003",
         "BriefDescription": "Cycles when PMH is busy with page walks.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x49",
+        "EventCode": "0x85",
         "Counter": "0,1,2,3",
         "UMask": "0x10",
-        "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+        "EventName": "ITLB_MISSES.STLB_HIT",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+        "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
+    {
+        "EventCode": "0xAE",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "ITLB.ITLB_FLUSH",
+        "SampleAfterValue": "100007",
+        "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
index 82be7d1b8b814b0580d7ead69b39da8d168da3f6..805ef1436539976877f643c098a5861668ae7d60 100644 (file)
@@ -36,7 +36,7 @@
         "BriefDescription": "L2 cache request misses"
     },
     {
-        "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss.  Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.\r\nCounts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes.  This will include cycles due to an ITLB miss, ICache miss and other events. \r\n",
+        "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss.  Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.\r\nCounts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes.  This will include cycles due to an ITLB miss, ICache miss and other events.",
         "EventCode": "0x86",
         "Counter": "0,1",
         "UMask": "0x4",
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/other.json b/tools/perf/pmu-events/arch/x86/silvermont/other.json
new file mode 100644 (file)
index 0000000..4781404
--- /dev/null
@@ -0,0 +1,20 @@
+[
+    {
+        "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss.  Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
+        "EventCode": "0x86",
+        "Counter": "0,1",
+        "UMask": "0x2",
+        "EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Cycles code-fetch stalled due to an outstanding ITLB miss."
+    },
+    {
+        "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes.  This will include cycles due to an ITLB miss, ICache miss and other events.",
+        "EventCode": "0x86",
+        "Counter": "0,1",
+        "UMask": "0x3f",
+        "EventName": "FETCH_STALL.ALL",
+        "SampleAfterValue": "200003",
+        "BriefDescription": "Cycles code-fetch stalled due to any reason."
+    }
+]
\ No newline at end of file
index 7468af99190ad055a1c4eaa00ef5b35c90543507..1ed62ad4cf778201a1409d64944420c288a7f57a 100644 (file)
         "UMask": "0x4",
         "EventName": "NO_ALLOC_CYCLES.MISPREDICTS",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Counts the number of cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted jump to retire.  After the misprediction is detected, the front end will start immediately but the allocate pipe stalls until the mispredicted "
+        "BriefDescription": "Counts the number of cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted jump to retire.  After the misprediction is detected, the front end will start immediately but the allocate pipe stalls until the mispredicted"
     },
     {
         "EventCode": "0xCA",
     },
     {
         "PublicDescription": "This event counts the number of instructions that retire.  For instructions that consist of multiple micro-ops, this event counts exactly once, as the last micro-op of the instruction retires.  The event continues counting while instructions retire, including during interrupt service routines caused by hardware interrupts, faults or traps.  Background: Modern microprocessors employ extensive pipelining and speculative techniques.  Since sometimes an instruction is started but never completed, the notion of \"retirement\" is introduced.  A retired instruction is one that commits its states. Or stated differently, an instruction might be abandoned at some point. No instruction is truly finished until it retires.  This counter measures the number of completed instructions.  The fixed event is INST_RETIRED.ANY and the programmable event is INST_RETIRED.ANY_P.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 1",
         "UMask": "0x1",
         "EventName": "INST_RETIRED.ANY",
     },
     {
         "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios.  The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. In systems with a constant core frequency, this event can give you a measurement of the elapsed time while the core was not in halt state by dividing the event count by the core frequency. This event is architecturally defined and is a designated fixed counter.  CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.CORE_P use the core frequency which may change from time to time.  CPU_CLK_UNHALTE.REF_TSC and CPU_CLK_UNHALTED.REF are not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time.  The fixed events are CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.REF_TSC and the programmable events are CPU_CLK_UNHALTED.CORE_P and CPU_CLK_UNHALTED.REF.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 2",
         "UMask": "0x2",
         "EventName": "CPU_CLK_UNHALTED.CORE",
     },
     {
         "PublicDescription": "Counts the number of reference cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios.  The core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time.  Divide this event count by core frequency to determine the elapsed time while the core was not in halt state.  Divide this event count by core frequency to determine the elapsed time while the core was not in halt state.  This event is architecturally defined and is a designated fixed counter.  CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.CORE_P use the core frequency which may change from time to time.  CPU_CLK_UNHALTE.REF_TSC and CPU_CLK_UNHALTED.REF are not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time.  The fixed events are CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.REF_TSC and the programmable events are CPU_CLK_UNHALTED.CORE_P and CPU_CLK_UNHALTED.REF.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 3",
         "UMask": "0x3",
         "EventName": "CPU_CLK_UNHALTED.REF_TSC",
index 54bfe9e4045c76d8d5697dd6c98206e0b5f02df7..720458139049c1f4628e32cde02f2aa925e15449 100644 (file)
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Counts the number of demand Data Read requests that hit L2 cache. Only non rejected loads are counted.",
+        "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
         "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x41",
+        "UMask": "0xc1",
         "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
         "SampleAfterValue": "200003",
         "BriefDescription": "Demand Data Read requests that hit L2 cache",
@@ -73,7 +73,7 @@
         "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
         "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x42",
+        "UMask": "0xc2",
         "EventName": "L2_RQSTS.RFO_HIT",
         "SampleAfterValue": "200003",
         "BriefDescription": "RFO requests that hit L2 cache",
@@ -83,7 +83,7 @@
         "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
         "EventCode": "0x24",
         "Counter": "0,1,2,3",
-        "UMask": "0x44",
+        "UMask": "0xc4",
         "EventName": "L2_RQSTS.CODE_RD_HIT",
         "SampleAfterValue": "200003",
         "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
     },
     {
         "PEBS": "1",
-        "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.\r\n",
+        "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
         "EventCode": "0xD1",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
     },
     {
         "PEBS": "1",
-        "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. \r\n",
+        "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
         "EventCode": "0xD1",
         "Counter": "0,1,2,3",
         "UMask": "0x40",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache.",
+        "PublicDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
         "EventCode": "0xF2",
         "Counter": "0,1,2,3",
         "UMask": "0x4",
         "EventName": "L2_LINES_OUT.USELESS_PREF",
         "SampleAfterValue": "200003",
-        "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+        "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fc0400001 ",
+        "MSRValue": "0x3FC0408000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & ANY_SNOOP",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000400001 ",
+        "MSRValue": "0x1000408000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HITM",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400400001 ",
+        "MSRValue": "0x0400408000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200400001 ",
+        "MSRValue": "0x0200408000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_MISS",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100400001 ",
+        "MSRValue": "0x0100408000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080400001 ",
+        "MSRValue": "0x0080408000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NONE",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fc01c0001 ",
+        "MSRValue": "0x0040408000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_HIT & ANY_SNOOP",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x10001c0001 ",
+        "MSRValue": "0x3FC01C8000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_HITM",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x04001c0001 ",
+        "MSRValue": "0x10001C8000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops sent to sibling cores return clean response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x02001c0001 ",
+        "MSRValue": "0x04001C8000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops sent to sibling cores return clean response.",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x01001c0001 ",
+        "MSRValue": "0x02001C8000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00801c0001 ",
+        "MSRValue": "0x01001C8000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_NONE",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fc0020001 ",
+        "MSRValue": "0x00801C8000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1000020001 ",
+        "MSRValue": "0x00401C8000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0400020001 ",
+        "MSRValue": "0x3FC0108000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0200020001 ",
+        "MSRValue": "0x1000108000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0100020001 ",
+        "MSRValue": "0x0400108000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts any other requests",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0080020001 ",
+        "MSRValue": "0x0200108000",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100108000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080108000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040108000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0088000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000088000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400088000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200088000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100088000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080088000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040088000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0048000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000048000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400048000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200048000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100048000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080048000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040048000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0028000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000028000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400028000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200028000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100028000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080028000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040028000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests have any response type.",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0000018000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests have any response type.",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0400004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000400004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400400004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200400004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100400004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080400004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040400004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC01C0004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x10001C0004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x04001C0004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x02001C0004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x01001C0004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x00801C0004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x00401C0004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0100004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000100004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400100004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200100004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100100004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080100004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040100004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0080004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000080004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400080004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200080004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100080004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080080004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040080004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0040004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000040004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400040004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200040004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100040004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080040004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040040004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0020004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000020004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400020004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200020004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100020004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080020004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040020004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0000010004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0400002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000400002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400400002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200400002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100400002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080400002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040400002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC01C0002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x10001C0002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x04001C0002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x02001C0002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x01001C0002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x00801C0002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x00401C0002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0100002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000100002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400100002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200100002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100100002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080100002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040100002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0080002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000080002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400080002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200080002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100080002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080080002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040080002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0040002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000040002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400040002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200040002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100040002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080040002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040040002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0020002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000020002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400020002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200020002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100020002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080020002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040020002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs) have any response type.",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0000010002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0400001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000400001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400400001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200400001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100400001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080400001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040400001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC01C0001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x10001C0001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x04001C0001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x02001C0001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x01001C0001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x00801C0001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x00401C0001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0100001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000100001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400100001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200100001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100100001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080100001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040100001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0080001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000080001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400080001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200080001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100080001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080080001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040080001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0040001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000040001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400040001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200040001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100040001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080040001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040040001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC0020001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1000020001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0400020001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0200020001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0100020001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0080020001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0040020001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads have any response type.",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0000010001 ",
+        "MSRValue": "0x0000010001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts demand data reads that have any response type.",
+        "BriefDescription": "Counts demand data reads have any response type.",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     }
index 578dff5bd823cb4a66ebc5aa52c14bbad1af5af2..7fa95a35e3cacc9896701e4578014b956f6f6bd7 100644 (file)
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4  x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions).  c. Instruction Decode Queue (IDQ) delivers four uops.",
+        "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions).  c. Instruction Decode Queue (IDQ) delivers four uops.",
         "EventCode": "0x9C",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
+        "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.",
         "EventCode": "0xAB",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
     },
     {
         "PEBS": "1",
-        "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. \r\n",
+        "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
         "EventCode": "0xC6",
         "MSRValue": "0x11",
         "Counter": "0,1,2,3",
     },
     {
         "PEBS": "1",
-        "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. \r\n",
+        "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
         "EventCode": "0xC6",
         "MSRValue": "0x400806",
         "Counter": "0,1,2,3",
     },
     {
         "PEBS": "1",
-        "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.\r\n",
+        "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
         "EventCode": "0xC6",
         "MSRValue": "0x401006",
         "Counter": "0,1,2,3",
     },
     {
         "PEBS": "1",
-        "PublicDescription": "Counts retired instructions that are delivered to the back-end  after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.\r\n",
+        "PublicDescription": "Counts retired instructions that are delivered to the back-end  after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
         "EventCode": "0xC6",
         "MSRValue": "0x402006",
         "Counter": "0,1,2,3",
     },
     {
         "PEBS": "1",
-        "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.\r\n",
+        "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
         "EventCode": "0xC6",
         "MSRValue": "0x100206",
         "Counter": "0,1,2,3",
index 3bd8b712c889d53c3e8557d67751a396849e1021..f197b4c7695beb45c8e22905a33eb35a49200dff 100644 (file)
         "UMask": "0x4",
         "EventName": "HLE_RETIRED.ABORTED",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one). ",
+        "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
+        "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
         "EventCode": "0xC8",
         "Counter": "0,1,2,3",
         "UMask": "0x20",
         "UMask": "0x4",
         "EventName": "RTM_RETIRED.ABORTED",
         "SampleAfterValue": "2000003",
-        "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one). ",
+        "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
     },
     {
         "PEBS": "2",
-        "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles.  Reported latency may be longer than just the memory latency.",
+        "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.  Reported latency may be longer than just the memory latency.",
         "EventCode": "0xCD",
         "MSRValue": "0x4",
         "Counter": "0,1,2,3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "100003",
-        "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles.",
+        "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
         "TakenAlone": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "PEBS": "2",
-        "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles.  Reported latency may be longer than just the memory latency.",
+        "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.  Reported latency may be longer than just the memory latency.",
         "EventCode": "0xCD",
         "MSRValue": "0x8",
         "Counter": "0,1,2,3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "50021",
-        "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles.",
+        "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
         "TakenAlone": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "PEBS": "2",
-        "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles.  Reported latency may be longer than just the memory latency.",
+        "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.  Reported latency may be longer than just the memory latency.",
         "EventCode": "0xCD",
         "MSRValue": "0x10",
         "Counter": "0,1,2,3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "20011",
-        "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles.",
+        "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
         "TakenAlone": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "PEBS": "2",
-        "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles.  Reported latency may be longer than just the memory latency.",
+        "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.  Reported latency may be longer than just the memory latency.",
         "EventCode": "0xCD",
         "MSRValue": "0x20",
         "Counter": "0,1,2,3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "100007",
-        "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles.",
+        "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
         "TakenAlone": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "PEBS": "2",
-        "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles.  Reported latency may be longer than just the memory latency.",
+        "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.  Reported latency may be longer than just the memory latency.",
         "EventCode": "0xCD",
         "MSRValue": "0x40",
         "Counter": "0,1,2,3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "2003",
-        "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles.",
+        "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
         "TakenAlone": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "PEBS": "2",
-        "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles.  Reported latency may be longer than just the memory latency.",
+        "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.  Reported latency may be longer than just the memory latency.",
         "EventCode": "0xCD",
         "MSRValue": "0x80",
         "Counter": "0,1,2,3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "1009",
-        "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles.",
+        "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
         "TakenAlone": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "PEBS": "2",
-        "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles.  Reported latency may be longer than just the memory latency.",
+        "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.  Reported latency may be longer than just the memory latency.",
         "EventCode": "0xCD",
         "MSRValue": "0x100",
         "Counter": "0,1,2,3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "503",
-        "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles.",
+        "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
         "TakenAlone": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "PEBS": "2",
-        "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles.  Reported latency may be longer than just the memory latency.",
+        "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.  Reported latency may be longer than just the memory latency.",
         "EventCode": "0xCD",
         "MSRValue": "0x200",
         "Counter": "0,1,2,3",
         "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
         "MSRIndex": "0x3F6",
         "SampleAfterValue": "101",
-        "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles.",
+        "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
         "TakenAlone": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts any other requests",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3ffc000001 ",
+        "MSRValue": "0x3FFC408000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x203C408000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x103C408000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x043C408000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x023C408000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x013C408000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x00BC408000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x007C408000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC4008000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2004008000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1004008000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0404008000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0204008000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0104008000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0084008000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0044008000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000408000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x20001C8000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000108000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000088000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000048000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts any other requests",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000028000",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts any other requests",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FFC400004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x203C400004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x103C400004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x043C400004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x023C400004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x013C400004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x00BC400004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x007C400004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC4000004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2004000004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1004000004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0404000004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0204000004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0104000004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0084000004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0044000004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000400004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x20001C0004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000100004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000080004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000040004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000020004",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FFC400002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x203C400002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x103C400002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x043C400002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x023C400002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x013C400002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x00BC400002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x007C400002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FC4000002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2004000002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1004000002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0404000002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0204000002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0104000002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0084000002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0044000002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000400002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x20001C0002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000100002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000080002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000040002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts all demand data writes (RFOs)",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000020002",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x3FFC400001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS & ANY_SNOOP",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x103c000001 ",
+        "MSRValue": "0x203C400001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x103C400001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_HITM",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x043c000001 ",
+        "MSRValue": "0x043C400001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x023c000001 ",
+        "MSRValue": "0x023C400001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_MISS",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x013c000001 ",
+        "MSRValue": "0x013C400001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x00bc000001 ",
+        "MSRValue": "0x00BC400001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x007C400001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NONE",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x3fc4000001 ",
+        "MSRValue": "0x3FC4000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x1004000001 ",
+        "MSRValue": "0x2004000001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x1004000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0404000001 ",
+        "MSRValue": "0x0404000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0204000001 ",
+        "MSRValue": "0x0204000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0104000001 ",
+        "MSRValue": "0x0104000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     },
     {
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "PublicDescription": "Counts demand data reads",
         "EventCode": "0xB7, 0xBB",
-        "MSRValue": "0x0084000001 ",
+        "MSRValue": "0x0084000001",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
-        "MSRIndex": "0x1a6,0x1a7",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x0044000001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000400001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x20001C0001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000100001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000080001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000040001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "SampleAfterValue": "100003",
+        "BriefDescription": "Counts demand data reads",
+        "Offcore": "1",
+        "CounterHTOff": "0,1,2,3"
+    },
+    {
+        "PublicDescription": "Counts demand data reads",
+        "EventCode": "0xB7, 0xBB",
+        "MSRValue": "0x2000020001",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+        "MSRIndex": "0x1a6, 0x1a7",
         "SampleAfterValue": "100003",
-        "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+        "BriefDescription": "Counts demand data reads",
         "Offcore": "1",
         "CounterHTOff": "0,1,2,3"
     }
index bc6d2afbcd8acb4e2af0aac3175780b2fb2a66d9..4a891fbbc4bb2ba20f709fe461d2eb25cea7ba8a 100644 (file)
@@ -1,7 +1,6 @@
 [
     {
         "PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 0",
         "UMask": "0x1",
         "EventName": "INST_RETIRED.ANY",
@@ -11,7 +10,6 @@
     },
     {
         "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 1",
         "UMask": "0x2",
         "EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -20,7 +18,6 @@
         "CounterHTOff": "Fixed counter 1"
     },
     {
-        "EventCode": "0x00",
         "Counter": "Fixed counter 1",
         "UMask": "0x2",
         "AnyThread": "1",
@@ -31,7 +28,6 @@
     },
     {
         "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'.  The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'.  After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
-        "EventCode": "0x00",
         "Counter": "Fixed counter 2",
         "UMask": "0x3",
         "EventName": "CPU_CLK_UNHALTED.REF_TSC",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
+        "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to \u201cMixing Intel AVX and Intel SSE Code\u201d section of the Optimization Guide.",
         "EventCode": "0x0E",
         "Counter": "0,1,2,3",
         "UMask": "0x2",
         "BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
+    {
+        "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
+        "EventCode": "0x59",
+        "Counter": "0,1,2,3",
+        "UMask": "0x1",
+        "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
     {
         "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
         "EventCode": "0x5E",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "Counts resource-related stall cycles. Reasons for stalls can be as follows:a. *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots).b. *any* u-arch structure got empty (like INT/SIMD FreeLists).c. FPU control word (FPCW), MXCSR.and others. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
-        "EventCode": "0xA2",
+        "PublicDescription": "Counts resource-related stall cycles.",
+        "EventCode": "0xa2",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "EventName": "RESOURCE_STALLS.ANY",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+        "PublicDescription": "This event counts cycles without actually retired uops.",
         "EventCode": "0xC2",
         "Invert": "1",
         "Counter": "0,1,2,3",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
+        "PublicDescription": "Number of machine clears (nukes) of any type.",
         "EventCode": "0xC3",
         "Counter": "0,1,2,3",
         "UMask": "0x1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+        "PEBS": "1",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts not taken branch instructions retired.",
         "EventCode": "0xC4",
         "Counter": "0,1,2,3",
         "UMask": "0x10",
         "Errata": "SKL091",
         "EventName": "BR_INST_RETIRED.NOT_TAKEN",
         "SampleAfterValue": "400009",
-        "BriefDescription": "Not taken branch instructions retired.",
+        "BriefDescription": "Counts all not taken macro branch instructions retired.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "UMask": "0x20",
         "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
         "SampleAfterValue": "400009",
-        "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken. ",
+        "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "BriefDescription": "Increments whenever there is an update to the LBR array.",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
+    {
+        "EventCode": "0xCC",
+        "Counter": "0,1,2,3",
+        "UMask": "0x40",
+        "EventName": "ROB_MISC_EVENTS.PAUSE_INST",
+        "SampleAfterValue": "2000003",
+        "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
     {
         "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
         "EventCode": "0xE6",
index 71e9737f4614dba62fd60d740c42201499a1f480..2c95417a4dae1f63a568dbfbb23acde8cd24c3fd 100644 (file)
 [
     {
-        "BriefDescription": "Instructions Per Cycle (per logical thread)",
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Frontend_Bound"
+    },
+    {
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Frontend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Bad_Speculation"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Bad_Speculation_SMT"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Backend_Bound"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Backend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Retiring"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Retiring_SMT"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Instructions Per Cycle (per logical thread)",
         "MetricGroup": "TopDownL1",
         "MetricName": "IPC"
     },
     {
-        "BriefDescription": "Uops Per Instruction",
         "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
-        "MetricGroup": "Pipeline",
+        "BriefDescription": "Uops Per Instruction",
+        "MetricGroup": "Pipeline;Retiring",
         "MetricName": "UPI"
     },
     {
-        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
-        "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
-        "MetricGroup": "Frontend",
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Instruction per taken branch",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "IpTB"
+    },
+    {
+        "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Branch instructions per taken branch. ",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "BpTB"
+    },
+    {
+        "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
+        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+        "MetricGroup": "PGO",
         "MetricName": "IFetch_Line_Utilization"
     },
     {
-        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
-        "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
-        "MetricGroup": "DSB; Frontend_Bandwidth",
+        "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))",
+        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+        "MetricGroup": "DSB;Frontend_Bandwidth",
         "MetricName": "DSB_Coverage"
     },
     {
-        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricGroup": "Pipeline;Summary",
         "MetricName": "CPI"
     },
     {
-        "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
         "MetricGroup": "Summary",
         "MetricName": "CLKS"
     },
     {
-        "BriefDescription": "Total issue-pipeline slots",
-        "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+        "MetricExpr": "4 * cycles",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
         "MetricGroup": "TopDownL1",
         "MetricName": "SLOTS"
     },
     {
-        "BriefDescription": "Total number of retired Instructions",
+        "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
+        "MetricGroup": "TopDownL1_SMT",
+        "MetricName": "SLOTS_SMT"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+        "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+        "MetricGroup": "Instruction_Type;L1_Bound",
+        "MetricName": "IpL"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+        "BriefDescription": "Instructions per Store",
+        "MetricGroup": "Instruction_Type;Store_Bound",
+        "MetricName": "IpS"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Instructions per Branch",
+        "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+        "MetricName": "IpB"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+        "BriefDescription": "Instruction per (near) call",
+        "MetricGroup": "Branches",
+        "MetricName": "IpCall"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY",
+        "BriefDescription": "Total number of retired Instructions",
         "MetricGroup": "Summary",
         "MetricName": "Instructions"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / cycles",
         "BriefDescription": "Instructions Per Cycle (per physical core)",
-        "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
         "MetricGroup": "SMT",
         "MetricName": "CoreIPC"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Instructions Per Cycle (per physical core)",
+        "MetricGroup": "SMT",
+        "MetricName": "CoreIPC_SMT"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS",
+        "MetricName": "FLOPc"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS_SMT",
+        "MetricName": "FLOPc_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
         "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
-        "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
         "MetricGroup": "Pipeline;Ports_Utilization",
         "MetricName": "ILP"
     },
     {
-        "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
-        "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL  - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
-        "MetricGroup": "Unknown_Branches",
-        "MetricName": "BAClear_Cost"
+        "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+        "MetricGroup": "Branch_Mispredicts",
+        "MetricName": "Branch_Misprediction_Cost"
     },
     {
+        "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+        "MetricGroup": "Branch_Mispredicts_SMT",
+        "MetricName": "Branch_Misprediction_Cost_SMT"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+        "MetricGroup": "Branch_Mispredicts",
+        "MetricName": "IpMispredict"
+    },
+    {
+        "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
         "BriefDescription": "Core actual clocks when any thread is active on the physical core",
-        "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
         "MetricGroup": "SMT",
         "MetricName": "CORE_CLKS"
     },
     {
-        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
         "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
+        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
         "MetricGroup": "Memory_Bound;Memory_Lat",
         "MetricName": "Load_Miss_Real_Latency"
     },
     {
-        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
-        "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+        "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
         "MetricGroup": "Memory_Bound;Memory_BW",
         "MetricName": "MLP"
     },
     {
+        "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
         "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
-        "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
         "MetricGroup": "TLB",
         "MetricName": "Page_Walks_Utilization"
     },
     {
-        "BriefDescription": "Average CPU Utilization",
+        "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
+        "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+        "MetricGroup": "TLB_SMT",
+        "MetricName": "Page_Walks_Utilization_SMT"
+    },
+    {
+        "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L1D_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L2_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+        "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L3_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+        "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L3_Cache_Access_BW"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L1MPKI"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI"
+    },
+    {
+        "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2HPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L3MPKI"
+    },
+    {
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+        "BriefDescription": "Average CPU Utilization",
         "MetricGroup": "Summary",
         "MetricName": "CPU_Utilization"
     },
     {
+        "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
         "BriefDescription": "Giga Floating Point Operations Per Second",
-        "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
         "MetricGroup": "FLOPS;Summary",
         "MetricName": "GFLOPs"
     },
     {
-        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricGroup": "Power",
         "MetricName": "Turbo_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricGroup": "SMT;Summary",
         "MetricName": "SMT_2T_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
     {
-        "BriefDescription": "C3 residency percent per core",
+        "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+        "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "DRAM_BW_Use"
+    },
+    {
+        "MetricExpr": "arb@event\\=0x80\\,umask\\=0x2@ / arb@event\\=0x80\\,umask\\=0x2\\,thresh\\=1@",
+        "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "DRAM_Parallel_Reads"
+    },
+    {
         "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per core",
         "MetricName": "C3_Core_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per core",
         "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per core",
         "MetricName": "C6_Core_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per core",
         "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per core",
         "MetricName": "C7_Core_Residency"
     },
     {
-        "BriefDescription": "C2 residency percent per package",
         "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C2 residency percent per package",
         "MetricName": "C2_Pkg_Residency"
     },
     {
-        "BriefDescription": "C3 residency percent per package",
         "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per package",
         "MetricName": "C3_Pkg_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per package",
         "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per package",
         "MetricName": "C6_Pkg_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per package",
         "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per package",
         "MetricName": "C7_Pkg_Residency"
     }
 ]
index 5c9940866acd851f1fcf93714bb5dfe6f5cd887c..24df183693faa5ab10dcbb3f6312316dbc73e0fd 100644 (file)
     },
     {
         "EventCode": "0x24",
-        "UMask": "0x41",
+        "UMask": "0xc1",
         "BriefDescription": "Demand Data Read requests that hit L2 cache",
         "Counter": "0,1,2,3",
         "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
-        "PublicDescription": "Counts the number of demand Data Read requests that hit L2 cache. Only non rejected loads are counted.",
+        "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
         "SampleAfterValue": "200003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x24",
-        "UMask": "0x42",
+        "UMask": "0xc2",
         "BriefDescription": "RFO requests that hit L2 cache",
         "Counter": "0,1,2,3",
         "EventName": "L2_RQSTS.RFO_HIT",
@@ -81,7 +81,7 @@
     },
     {
         "EventCode": "0x24",
-        "UMask": "0x44",
+        "UMask": "0xc4",
         "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
         "Counter": "0,1,2,3",
         "EventName": "L2_RQSTS.CODE_RD_HIT",
         "BriefDescription": "Core-originated cacheable demand requests missed L3",
         "Counter": "0,1,2,3",
         "EventName": "LONGEST_LAT_CACHE.MISS",
+        "Errata": "SKL057",
         "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
         "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
         "Counter": "0,1,2,3",
         "EventName": "LONGEST_LAT_CACHE.REFERENCE",
-        "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2.  It does not include all accesses to the L3.",
+        "Errata": "SKL057",
+        "PublicDescription": "Counts core-originated cacheable requests to the  L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2.  It does not include all accesses to the L3.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x48",
         "UMask": "0x1",
-        "BriefDescription": "L1D miss outstandings duration in cycles",
+        "BriefDescription": "Cycles with L1D load Misses outstanding.",
         "Counter": "0,1,2,3",
-        "EventName": "L1D_PEND_MISS.PENDING",
-        "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+        "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+        "CounterMask": "1",
+        "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x48",
         "UMask": "0x1",
-        "BriefDescription": "Cycles with L1D load Misses outstanding.",
+        "BriefDescription": "L1D miss outstandings duration in cycles",
         "Counter": "0,1,2,3",
-        "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
-        "CounterMask": "1",
-        "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+        "EventName": "L1D_PEND_MISS.PENDING",
+        "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x60",
         "UMask": "0x1",
-        "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+        "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
-        "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
+        "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+        "CounterMask": "1",
+        "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x60",
         "UMask": "0x1",
-        "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+        "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
-        "CounterMask": "1",
-        "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+        "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+        "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x60",
         "UMask": "0x8",
-        "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+        "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
-        "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+        "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+        "CounterMask": "1",
+        "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x60",
         "UMask": "0x8",
-        "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+        "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
-        "CounterMask": "1",
-        "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+        "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+        "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_RETIRED.L1_HIT",
-        "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.\r\n",
+        "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3"
     },
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "MEM_LOAD_RETIRED.FB_HIT",
-        "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. \r\n",
+        "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3"
     },
         "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared state. A non-threaded event.",
         "Counter": "0,1,2,3",
         "EventName": "L2_LINES_OUT.SILENT",
+        "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
         "SampleAfterValue": "200003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3.  Clean lines may either be allocated in L3 or dropped",
         "Counter": "0,1,2,3",
         "EventName": "L2_LINES_OUT.NON_SILENT",
-        "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3.  Clean lines may either be allocated in L3 or dropped.",
+        "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
         "SampleAfterValue": "200003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xF2",
         "UMask": "0x4",
-        "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+        "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
+        "Deprecated": "1",
         "Counter": "0,1,2,3",
         "EventName": "L2_LINES_OUT.USELESS_PREF",
-        "PublicDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache.",
+        "PublicDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
         "SampleAfterValue": "200003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts demand data reads that have any response type.",
-        "MSRValue": "0x0000010001 ",
+        "BriefDescription": "Counts demand data reads have any response type.",
+        "MSRValue": "0x0000010001",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand data reads have any response type.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
-        "MSRValue": "0x01003c0001 ",
+        "BriefDescription": "Counts demand data reads TBD TBD",
+        "MSRValue": "0x01003C0001",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand data reads TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x04003c0001 ",
+        "BriefDescription": "Counts demand data reads TBD TBD",
+        "MSRValue": "0x04003C0001",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
-        "SampleAfterValue": "100003",
-        "CounterHTOff": "0,1,2,3"
-    },
-    {
-        "Offcore": "1",
-        "EventCode": "0xB7, 0xBB",
-        "UMask": "0x1",
-        "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
-        "MSRValue": "0x08003c0001 ",
-        "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand data reads TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x10003c0001 ",
+        "BriefDescription": "Counts demand data reads TBD TBD",
+        "MSRValue": "0x10003C0001",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand data reads TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts demand data reads that hit in the L3.",
-        "MSRValue": "0x3f803c0001 ",
+        "BriefDescription": "Counts demand data reads TBD TBD",
+        "MSRValue": "0x3F803C0001",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts demand data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand data reads TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that have any response type.",
-        "MSRValue": "0x0000010002 ",
+        "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
+        "MSRValue": "0x0000010002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all demand data writes (RFOs) have any response type.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
-        "MSRValue": "0x01003c0002 ",
+        "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+        "MSRValue": "0x01003C0002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x04003c0002 ",
+        "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+        "MSRValue": "0x04003C0002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
-        "MSRValue": "0x08003c0002 ",
-        "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
-        "SampleAfterValue": "100003",
-        "CounterHTOff": "0,1,2,3"
-    },
-    {
-        "Offcore": "1",
-        "EventCode": "0xB7, 0xBB",
-        "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x10003c0002 ",
+        "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+        "MSRValue": "0x10003C0002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3.",
-        "MSRValue": "0x3f803c0002 ",
+        "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+        "MSRValue": "0x3F803C0002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand code reads that have any response type.",
-        "MSRValue": "0x0000010004 ",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.",
+        "MSRValue": "0x0000010004",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
-        "MSRValue": "0x01003c0004 ",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+        "MSRValue": "0x01003C0004",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x04003c0004 ",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+        "MSRValue": "0x04003C0004",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
-        "MSRValue": "0x08003c0004 ",
-        "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
-        "SampleAfterValue": "100003",
-        "CounterHTOff": "0,1,2,3"
-    },
-    {
-        "Offcore": "1",
-        "EventCode": "0xB7, 0xBB",
-        "UMask": "0x1",
-        "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x10003c0004 ",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+        "MSRValue": "0x10003C0004",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand code reads that hit in the L3.",
-        "MSRValue": "0x3f803c0004 ",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+        "MSRValue": "0x3F803C0004",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that have any response type.",
-        "MSRValue": "0x0000010010 ",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
+        "MSRValue": "0x0000010010",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
-        "MSRValue": "0x01003c0010 ",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+        "MSRValue": "0x01003C0010",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x04003c0010 ",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+        "MSRValue": "0x04003C0010",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "PF_L2_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
-        "MSRValue": "0x08003c0010 ",
-        "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
-        "SampleAfterValue": "100003",
-        "CounterHTOff": "0,1,2,3"
-    },
-    {
-        "Offcore": "1",
-        "EventCode": "0xB7, 0xBB",
-        "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x10003c0010 ",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+        "MSRValue": "0x10003C0010",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3.",
-        "MSRValue": "0x3f803c0010 ",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+        "MSRValue": "0x3F803C0010",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type.",
-        "MSRValue": "0x0000010020 ",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
+        "MSRValue": "0x0000010020",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
-        "MSRValue": "0x01003c0020 ",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+        "MSRValue": "0x01003C0020",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x04003c0020 ",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+        "MSRValue": "0x04003C0020",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "PF_L2_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
-        "MSRValue": "0x08003c0020 ",
-        "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
-        "SampleAfterValue": "100003",
-        "CounterHTOff": "0,1,2,3"
-    },
-    {
-        "Offcore": "1",
-        "EventCode": "0xB7, 0xBB",
-        "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x10003c0020 ",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+        "MSRValue": "0x10003C0020",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3.",
-        "MSRValue": "0x3f803c0020 ",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+        "MSRValue": "0x3F803C0020",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.",
-        "MSRValue": "0x0000010080 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
+        "MSRValue": "0x0000010080",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
-        "MSRValue": "0x01003c0080 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+        "MSRValue": "0x01003C0080",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x04003c0080 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+        "MSRValue": "0x04003C0080",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
-        "SampleAfterValue": "100003",
-        "CounterHTOff": "0,1,2,3"
-    },
-    {
-        "Offcore": "1",
-        "EventCode": "0xB7, 0xBB",
-        "UMask": "0x1",
-        "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
-        "MSRValue": "0x08003c0080 ",
-        "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x10003c0080 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+        "MSRValue": "0x10003C0080",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3.",
-        "MSRValue": "0x3f803c0080 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+        "MSRValue": "0x3F803C0080",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.",
-        "MSRValue": "0x0000010100 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
+        "MSRValue": "0x0000010100",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
-        "MSRValue": "0x01003c0100 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+        "MSRValue": "0x01003C0100",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x04003c0100 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+        "MSRValue": "0x04003C0100",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
-        "MSRValue": "0x08003c0100 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+        "MSRValue": "0x10003C0100",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x10003c0100 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+        "MSRValue": "0x3F803C0100",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3.",
-        "MSRValue": "0x3f803c0100 ",
+        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests have any response type.",
+        "MSRValue": "0x0000010400",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests have any response type.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that have any response type.",
-        "MSRValue": "0x0000010400 ",
+        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+        "MSRValue": "0x01003C0400",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
-        "MSRValue": "0x01003c0400 ",
+        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+        "MSRValue": "0x04003C0400",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x04003c0400 ",
+        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+        "MSRValue": "0x10003C0400",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "PF_L1D_AND_SW & L3_HIT & SNOOP_HIT_WITH_FWD",
-        "MSRValue": "0x08003c0400 ",
+        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+        "MSRValue": "0x3F803C0400",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x10003c0400 ",
+        "BriefDescription": "TBD have any response type.",
+        "MSRValue": "0x0000010490",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD have any response type.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3.",
-        "MSRValue": "0x3f803c0400 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x01003C0490",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts any other requests that have any response type.",
-        "MSRValue": "0x0000018000 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x04003C0490",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts any other requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
-        "MSRValue": "0x01003c8000 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x10003C0490",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x04003c8000 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x3F803C0490",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "OTHER & L3_HIT & SNOOP_HIT_WITH_FWD",
-        "MSRValue": "0x08003c8000 ",
+        "BriefDescription": "TBD have any response type.",
+        "MSRValue": "0x0000010120",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD have any response type.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x10003c8000 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x01003C0120",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts any other requests that hit in the L3.",
-        "MSRValue": "0x3f803c8000 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x04003C0120",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts any other requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch data reads that have any response type.",
-        "MSRValue": "0x0000010490 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x10003C0120",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
-        "MSRValue": "0x01003c0490 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x3F803C0120",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x04003c0490 ",
+        "BriefDescription": "TBD have any response type.",
+        "MSRValue": "0x0000010491",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD have any response type.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
-        "MSRValue": "0x08003c0490 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x01003C0491",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x10003c0490 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x04003C0491",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch data reads that hit in the L3.",
-        "MSRValue": "0x3f803c0490 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x10003C0491",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch RFOs that have any response type.",
-        "MSRValue": "0x0000010120 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x3F803C0491",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
-        "MSRValue": "0x01003c0120 ",
+        "BriefDescription": "TBD have any response type.",
+        "MSRValue": "0x0000010122",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD have any response type.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x04003c0120 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x01003C0122",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "ALL_PF_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
-        "MSRValue": "0x08003c0120 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x04003C0122",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x10003c0120 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x10003C0122",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch RFOs that hit in the L3.",
-        "MSRValue": "0x3f803c0120 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x3F803C0122",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that have any response type.",
-        "MSRValue": "0x0000010491 ",
+        "BriefDescription": "Counts demand data reads",
+        "MSRValue": "0x08007C0001",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+        "PublicDescription": "Counts demand data reads",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
-        "MSRValue": "0x01003c0491 ",
+        "BriefDescription": "Counts all demand data writes (RFOs)",
+        "MSRValue": "0x08007C0002",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+        "PublicDescription": "Counts all demand data writes (RFOs)",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x04003c0491 ",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+        "MSRValue": "0x08007C0004",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "ALL_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
-        "MSRValue": "0x08003c0491 ",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+        "MSRValue": "0x08007C0010",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x10003c0491 ",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+        "MSRValue": "0x08007C0020",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3.",
-        "MSRValue": "0x3f803c0491 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+        "MSRValue": "0x08007C0080",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that have any response type.",
-        "MSRValue": "0x0000010122 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+        "MSRValue": "0x08007C0100",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
-        "MSRValue": "0x01003c0122 ",
+        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+        "MSRValue": "0x08007C0400",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
+        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x04003c0122 ",
+        "BriefDescription": "TBD",
+        "MSRValue": "0x08007C0490",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+        "PublicDescription": "TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "ALL_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
-        "MSRValue": "0x08003c0122 ",
+        "BriefDescription": "TBD",
+        "MSRValue": "0x08007C0120",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+        "PublicDescription": "TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
-        "MSRValue": "0x10003c0122 ",
+        "BriefDescription": "TBD",
+        "MSRValue": "0x08007C0491",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+        "PublicDescription": "TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3.",
-        "MSRValue": "0x3f803c0122 ",
+        "BriefDescription": "TBD",
+        "MSRValue": "0x08007C0122",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+        "PublicDescription": "TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     }
index 286ed1a37ec9a80be77d62418113d7a2e44f1fcf..c5d0babe89fcef11db9ffabc956c741138f60a46 100644 (file)
@@ -59,7 +59,6 @@
         "BriefDescription": "Number of Packed Double-Precision FP arithmetic instructions (Use operation multiplier of 8)",
         "Counter": "0,1,2,3",
         "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
-        "PublicDescription": "Number of Packed Double-Precision FP arithmetic instructions (Use operation multiplier of 8).",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
@@ -69,7 +68,6 @@
         "BriefDescription": "Number of Packed Single-Precision FP arithmetic instructions (Use operation multiplier of 16)",
         "Counter": "0,1,2,3",
         "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
-        "PublicDescription": "Number of Packed Single-Precision FP arithmetic instructions (Use operation multiplier of 16).",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
index 403a4f89e9b2778161aee8c2e26b40ec2f1cc96a..4dc583cfb5459c29129f31443d454d3c2667dd33 100644 (file)
@@ -1,14 +1,4 @@
 [
-    {
-        "EventCode": "0x79",
-        "UMask": "0x4",
-        "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
-        "Counter": "0,1,2,3",
-        "EventName": "IDQ.MITE_UOPS",
-        "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
-        "SampleAfterValue": "2000003",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
     {
         "EventCode": "0x79",
         "UMask": "0x4",
     },
     {
         "EventCode": "0x79",
-        "UMask": "0x8",
-        "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+        "UMask": "0x4",
+        "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
         "Counter": "0,1,2,3",
-        "EventName": "IDQ.DSB_UOPS",
-        "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+        "EventName": "IDQ.MITE_UOPS",
+        "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
+    {
+        "EventCode": "0x79",
+        "UMask": "0x8",
+        "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+        "Counter": "0,1,2,3",
+        "EventName": "IDQ.DSB_UOPS",
+        "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+        "SampleAfterValue": "2000003",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
     {
         "EventCode": "0x79",
         "UMask": "0x10",
     {
         "EventCode": "0x79",
         "UMask": "0x18",
-        "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+        "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
         "Counter": "0,1,2,3",
-        "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
-        "CounterMask": "4",
-        "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+        "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+        "CounterMask": "1",
+        "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x79",
         "UMask": "0x18",
-        "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+        "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
         "Counter": "0,1,2,3",
-        "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
-        "CounterMask": "1",
-        "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+        "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+        "CounterMask": "4",
+        "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x79",
         "UMask": "0x24",
-        "BriefDescription": "Cycles MITE is delivering 4 Uops",
+        "BriefDescription": "Cycles MITE is delivering any Uop",
         "Counter": "0,1,2,3",
-        "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
-        "CounterMask": "4",
-        "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+        "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+        "CounterMask": "1",
+        "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x79",
         "UMask": "0x24",
-        "BriefDescription": "Cycles MITE is delivering any Uop",
+        "BriefDescription": "Cycles MITE is delivering 4 Uops",
         "Counter": "0,1,2,3",
-        "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
-        "CounterMask": "1",
-        "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+        "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+        "CounterMask": "4",
+        "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EdgeDetect": "1",
         "EventCode": "0x79",
         "UMask": "0x30",
-        "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+        "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
         "Counter": "0,1,2,3",
-        "EventName": "IDQ.MS_SWITCHES",
-        "CounterMask": "1",
-        "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+        "EventName": "IDQ.MS_UOPS",
+        "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
+        "EdgeDetect": "1",
         "EventCode": "0x79",
         "UMask": "0x30",
-        "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+        "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
         "Counter": "0,1,2,3",
-        "EventName": "IDQ.MS_UOPS",
-        "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
+        "EventName": "IDQ.MS_SWITCHES",
+        "CounterMask": "1",
+        "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
+        "Invert": "1",
         "EventCode": "0x9C",
         "UMask": "0x1",
-        "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+        "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
         "Counter": "0,1,2,3",
-        "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
-        "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4  x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions).  c. Instruction Decode Queue (IDQ) delivers four uops.",
+        "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+        "CounterMask": "1",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x9C",
         "UMask": "0x1",
-        "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+        "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
         "Counter": "0,1,2,3",
-        "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
-        "CounterMask": "4",
-        "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
+        "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+        "CounterMask": "1",
+        "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x9C",
         "UMask": "0x1",
-        "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+        "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
         "Counter": "0,1,2,3",
-        "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
-        "CounterMask": "3",
-        "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
+        "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+        "CounterMask": "2",
+        "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x9C",
         "UMask": "0x1",
-        "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+        "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
         "Counter": "0,1,2,3",
-        "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
-        "CounterMask": "2",
-        "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
+        "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+        "CounterMask": "3",
+        "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x9C",
         "UMask": "0x1",
-        "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+        "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
         "Counter": "0,1,2,3",
-        "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
-        "CounterMask": "1",
-        "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
+        "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+        "CounterMask": "4",
+        "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "Invert": "1",
         "EventCode": "0x9C",
         "UMask": "0x1",
-        "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+        "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
         "Counter": "0,1,2,3",
-        "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
-        "CounterMask": "1",
+        "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+        "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions).  c. Instruction Decode Queue (IDQ) delivers four uops.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
         "Counter": "0,1,2,3",
         "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
-        "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
+        "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xC6",
         "UMask": "0x1",
-        "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss. Precise Event.",
+        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Precise Event.",
         "PEBS": "1",
-        "MSRValue": "0x11",
+        "MSRValue": "0x400406",
         "Counter": "0,1,2,3",
-        "EventName": "FRONTEND_RETIRED.DSB_MISS",
+        "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
         "MSRIndex": "0x3F7",
-        "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. \r\n",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3"
     {
         "EventCode": "0xC6",
         "UMask": "0x1",
-        "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss. Precise Event.",
+        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
         "PEBS": "1",
-        "MSRValue": "0x12",
+        "MSRValue": "0x200206",
         "Counter": "0,1,2,3",
-        "EventName": "FRONTEND_RETIRED.L1I_MISS",
+        "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
         "MSRIndex": "0x3F7",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
     {
         "EventCode": "0xC6",
         "UMask": "0x1",
-        "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss. Precise Event.",
+        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
         "PEBS": "1",
-        "MSRValue": "0x13",
+        "MSRValue": "0x400206",
         "Counter": "0,1,2,3",
-        "EventName": "FRONTEND_RETIRED.L2_MISS",
+        "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
         "MSRIndex": "0x3F7",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
     {
         "EventCode": "0xC6",
         "UMask": "0x1",
-        "BriefDescription": "Retired Instructions who experienced iTLB true miss. Precise Event.",
+        "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss. Precise Event.",
         "PEBS": "1",
-        "MSRValue": "0x14",
+        "MSRValue": "0x15",
         "Counter": "0,1,2,3",
-        "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+        "EventName": "FRONTEND_RETIRED.STLB_MISS",
         "MSRIndex": "0x3F7",
-        "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+        "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3"
     {
         "EventCode": "0xC6",
         "UMask": "0x1",
-        "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss. Precise Event.",
+        "BriefDescription": "Retired Instructions who experienced iTLB true miss. Precise Event.",
         "PEBS": "1",
-        "MSRValue": "0x15",
+        "MSRValue": "0x14",
         "Counter": "0,1,2,3",
-        "EventName": "FRONTEND_RETIRED.STLB_MISS",
+        "EventName": "FRONTEND_RETIRED.ITLB_MISS",
         "MSRIndex": "0x3F7",
-        "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+        "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3"
     {
         "EventCode": "0xC6",
         "UMask": "0x1",
-        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
+        "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss. Precise Event.",
         "PEBS": "1",
-        "MSRValue": "0x400206",
+        "MSRValue": "0x13",
         "Counter": "0,1,2,3",
-        "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+        "EventName": "FRONTEND_RETIRED.L2_MISS",
         "MSRIndex": "0x3F7",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
     {
         "EventCode": "0xC6",
         "UMask": "0x1",
-        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
+        "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss. Precise Event.",
         "PEBS": "1",
-        "MSRValue": "0x200206",
+        "MSRValue": "0x12",
         "Counter": "0,1,2,3",
-        "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
+        "EventName": "FRONTEND_RETIRED.L1I_MISS",
         "MSRIndex": "0x3F7",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
     {
         "EventCode": "0xC6",
         "UMask": "0x1",
-        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Precise Event.",
+        "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss. Precise Event.",
         "PEBS": "1",
-        "MSRValue": "0x400406",
+        "MSRValue": "0x11",
         "Counter": "0,1,2,3",
-        "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+        "EventName": "FRONTEND_RETIRED.DSB_MISS",
         "MSRIndex": "0x3F7",
+        "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3"
     {
         "EventCode": "0xC6",
         "UMask": "0x1",
-        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
         "PEBS": "1",
-        "MSRValue": "0x400806",
+        "MSRValue": "0x300206",
         "Counter": "0,1,2,3",
-        "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+        "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
         "MSRIndex": "0x3F7",
-        "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. \r\n",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3"
     {
         "EventCode": "0xC6",
         "UMask": "0x1",
-        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall. Precise Event.",
+        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
         "PEBS": "1",
-        "MSRValue": "0x401006",
+        "MSRValue": "0x100206",
         "Counter": "0,1,2,3",
-        "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+        "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
         "MSRIndex": "0x3F7",
-        "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.\r\n",
+        "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3"
     {
         "EventCode": "0xC6",
         "UMask": "0x1",
-        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall. Precise Event.",
+        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Precise Event.",
         "PEBS": "1",
-        "MSRValue": "0x402006",
+        "MSRValue": "0x420006",
         "Counter": "0,1,2,3",
-        "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+        "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
         "MSRIndex": "0x3F7",
-        "PublicDescription": "Counts retired instructions that are delivered to the back-end  after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.\r\n",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3"
     {
         "EventCode": "0xC6",
         "UMask": "0x1",
-        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Precise Event.",
+        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Precise Event.",
         "PEBS": "1",
-        "MSRValue": "0x404006",
+        "MSRValue": "0x410006",
         "Counter": "0,1,2,3",
-        "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+        "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
         "MSRIndex": "0x3F7",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
     {
         "EventCode": "0xC6",
         "UMask": "0x1",
-        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Precise Event.",
+        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Precise Event.",
         "PEBS": "1",
-        "MSRValue": "0x410006",
+        "MSRValue": "0x404006",
         "Counter": "0,1,2,3",
-        "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+        "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
         "MSRIndex": "0x3F7",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
     {
         "EventCode": "0xC6",
         "UMask": "0x1",
-        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Precise Event.",
+        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall. Precise Event.",
         "PEBS": "1",
-        "MSRValue": "0x420006",
+        "MSRValue": "0x402006",
         "Counter": "0,1,2,3",
-        "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+        "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
         "MSRIndex": "0x3F7",
+        "PublicDescription": "Counts retired instructions that are delivered to the back-end  after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3"
     {
         "EventCode": "0xC6",
         "UMask": "0x1",
-        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
+        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall. Precise Event.",
         "PEBS": "1",
-        "MSRValue": "0x100206",
+        "MSRValue": "0x401006",
         "Counter": "0,1,2,3",
-        "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+        "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
         "MSRIndex": "0x3F7",
-        "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.\r\n",
+        "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3"
     {
         "EventCode": "0xC6",
         "UMask": "0x1",
-        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
+        "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
         "PEBS": "1",
-        "MSRValue": "0x300206",
+        "MSRValue": "0x400806",
         "Counter": "0,1,2,3",
-        "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
+        "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
         "MSRIndex": "0x3F7",
+        "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
         "TakenAlone": "1",
         "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3"
index e7f1aa31226dc8d91fc558613b7d143d49eff25b..48a9cdf81307cbdd9c5db76706d067cc8f5e827b 100644 (file)
     {
         "EventCode": "0x60",
         "UMask": "0x10",
-        "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+        "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
-        "CounterMask": "1",
+        "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
+        "CounterMask": "6",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x60",
         "UMask": "0x10",
-        "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
+        "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
         "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
-        "CounterMask": "6",
+        "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+        "CounterMask": "1",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xC8",
         "UMask": "0x4",
-        "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one). ",
+        "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "HLE_RETIRED.ABORTED",
         "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
         "Counter": "0,1,2,3",
         "EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
+        "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xC9",
         "UMask": "0x4",
-        "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one). ",
+        "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "RTM_RETIRED.ABORTED",
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles.",
+        "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
         "PEBS": "2",
-        "MSRValue": "0x4",
+        "MSRValue": "0x200",
         "Counter": "0,1,2,3",
-        "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+        "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
         "MSRIndex": "0x3F6",
-        "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles.  Reported latency may be longer than just the memory latency.",
+        "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.  Reported latency may be longer than just the memory latency.",
         "TakenAlone": "1",
-        "SampleAfterValue": "100003",
+        "SampleAfterValue": "101",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles.",
+        "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
         "PEBS": "2",
-        "MSRValue": "0x8",
+        "MSRValue": "0x100",
         "Counter": "0,1,2,3",
-        "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+        "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
         "MSRIndex": "0x3F6",
-        "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles.  Reported latency may be longer than just the memory latency.",
+        "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.  Reported latency may be longer than just the memory latency.",
         "TakenAlone": "1",
-        "SampleAfterValue": "50021",
+        "SampleAfterValue": "503",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles.",
+        "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
         "PEBS": "2",
-        "MSRValue": "0x10",
+        "MSRValue": "0x80",
         "Counter": "0,1,2,3",
-        "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+        "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
         "MSRIndex": "0x3F6",
-        "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles.  Reported latency may be longer than just the memory latency.",
+        "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.  Reported latency may be longer than just the memory latency.",
         "TakenAlone": "1",
-        "SampleAfterValue": "20011",
+        "SampleAfterValue": "1009",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles.",
+        "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
         "PEBS": "2",
-        "MSRValue": "0x20",
+        "MSRValue": "0x40",
         "Counter": "0,1,2,3",
-        "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+        "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
         "MSRIndex": "0x3F6",
-        "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles.  Reported latency may be longer than just the memory latency.",
+        "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.  Reported latency may be longer than just the memory latency.",
         "TakenAlone": "1",
-        "SampleAfterValue": "100007",
+        "SampleAfterValue": "2003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles.",
+        "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
         "PEBS": "2",
-        "MSRValue": "0x40",
+        "MSRValue": "0x20",
         "Counter": "0,1,2,3",
-        "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+        "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
         "MSRIndex": "0x3F6",
-        "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles.  Reported latency may be longer than just the memory latency.",
+        "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.  Reported latency may be longer than just the memory latency.",
         "TakenAlone": "1",
-        "SampleAfterValue": "2003",
+        "SampleAfterValue": "100007",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles.",
+        "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
         "PEBS": "2",
-        "MSRValue": "0x80",
+        "MSRValue": "0x10",
         "Counter": "0,1,2,3",
-        "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+        "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
         "MSRIndex": "0x3F6",
-        "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles.  Reported latency may be longer than just the memory latency.",
+        "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.  Reported latency may be longer than just the memory latency.",
         "TakenAlone": "1",
-        "SampleAfterValue": "1009",
+        "SampleAfterValue": "20011",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles.",
+        "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
         "PEBS": "2",
-        "MSRValue": "0x100",
+        "MSRValue": "0x8",
         "Counter": "0,1,2,3",
-        "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+        "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
         "MSRIndex": "0x3F6",
-        "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles.  Reported latency may be longer than just the memory latency.",
+        "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.  Reported latency may be longer than just the memory latency.",
         "TakenAlone": "1",
-        "SampleAfterValue": "503",
+        "SampleAfterValue": "50021",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "EventCode": "0xCD",
         "UMask": "0x1",
-        "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles.",
+        "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
         "PEBS": "2",
-        "MSRValue": "0x200",
+        "MSRValue": "0x4",
         "Counter": "0,1,2,3",
-        "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+        "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
         "MSRIndex": "0x3F6",
-        "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles.  Reported latency may be longer than just the memory latency.",
+        "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.  Reported latency may be longer than just the memory latency.",
         "TakenAlone": "1",
-        "SampleAfterValue": "101",
+        "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
     {
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts demand data reads that miss in the L3.",
-        "MSRValue": "0x3fbc000001 ",
+        "BriefDescription": "Counts demand data reads TBD TBD",
+        "MSRValue": "0x3FBC000001",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts demand data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand data reads TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts demand data reads that miss the L3 and clean or shared data is transferred from remote cache.",
-        "MSRValue": "0x083fc00001 ",
+        "BriefDescription": "Counts demand data reads TBD",
+        "MSRValue": "0x083FC00001",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts demand data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand data reads TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts demand data reads that miss the L3 and the modified data is transferred from remote cache.",
-        "MSRValue": "0x103fc00001 ",
+        "BriefDescription": "Counts demand data reads TBD",
+        "MSRValue": "0x103FC00001",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts demand data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand data reads TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local or remote dram.",
-        "MSRValue": "0x063fc00001 ",
+        "BriefDescription": "Counts demand data reads TBD",
+        "MSRValue": "0x063FC00001",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand data reads TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from remote dram.",
-        "MSRValue": "0x063b800001 ",
+        "BriefDescription": "Counts demand data reads TBD",
+        "MSRValue": "0x063B800001",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand data reads TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram.",
-        "MSRValue": "0x0604000001 ",
+        "BriefDescription": "Counts demand data reads TBD",
+        "MSRValue": "0x0604000001",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand data reads TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3.",
-        "MSRValue": "0x3fbc000002 ",
+        "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+        "MSRValue": "0x3FBC000002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and clean or shared data is transferred from remote cache.",
-        "MSRValue": "0x083fc00002 ",
+        "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+        "MSRValue": "0x083FC00002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all demand data writes (RFOs) TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache.",
-        "MSRValue": "0x103fc00002 ",
+        "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+        "MSRValue": "0x103FC00002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all demand data writes (RFOs) TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local or remote dram.",
-        "MSRValue": "0x063fc00002 ",
+        "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+        "MSRValue": "0x063FC00002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all demand data writes (RFOs) TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from remote dram.",
-        "MSRValue": "0x063b800002 ",
+        "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+        "MSRValue": "0x063B800002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all demand data writes (RFOs) TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram.",
-        "MSRValue": "0x0604000002 ",
+        "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+        "MSRValue": "0x0604000002",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all demand data writes (RFOs) TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand code reads that miss in the L3.",
-        "MSRValue": "0x3fbc000004 ",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+        "MSRValue": "0x3FBC000004",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand code reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand code reads that miss the L3 and clean or shared data is transferred from remote cache.",
-        "MSRValue": "0x083fc00004 ",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+        "MSRValue": "0x083FC00004",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand code reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand code reads that miss the L3 and the modified data is transferred from remote cache.",
-        "MSRValue": "0x103fc00004 ",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+        "MSRValue": "0x103FC00004",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand code reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local or remote dram.",
-        "MSRValue": "0x063fc00004 ",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+        "MSRValue": "0x063FC00004",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from remote dram.",
-        "MSRValue": "0x063b800004 ",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+        "MSRValue": "0x063B800004",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram.",
-        "MSRValue": "0x0604000004 ",
+        "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+        "MSRValue": "0x0604000004",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3.",
-        "MSRValue": "0x3fbc000010 ",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+        "MSRValue": "0x3FBC000010",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and clean or shared data is transferred from remote cache.",
-        "MSRValue": "0x083fc00010 ",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+        "MSRValue": "0x083FC00010",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the modified data is transferred from remote cache.",
-        "MSRValue": "0x103fc00010 ",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+        "MSRValue": "0x103FC00010",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local or remote dram.",
-        "MSRValue": "0x063fc00010 ",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+        "MSRValue": "0x063FC00010",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from remote dram.",
-        "MSRValue": "0x063b800010 ",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+        "MSRValue": "0x063B800010",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local dram.",
-        "MSRValue": "0x0604000010 ",
+        "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+        "MSRValue": "0x0604000010",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3.",
-        "MSRValue": "0x3fbc000020 ",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+        "MSRValue": "0x3FBC000020",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
-        "MSRValue": "0x083fc00020 ",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+        "MSRValue": "0x083FC00020",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the modified data is transferred from remote cache.",
-        "MSRValue": "0x103fc00020 ",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+        "MSRValue": "0x103FC00020",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local or remote dram.",
-        "MSRValue": "0x063fc00020 ",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+        "MSRValue": "0x063FC00020",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from remote dram.",
-        "MSRValue": "0x063b800020 ",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+        "MSRValue": "0x063B800020",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local dram.",
-        "MSRValue": "0x0604000020 ",
+        "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+        "MSRValue": "0x0604000020",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3.",
-        "MSRValue": "0x3fbc000080 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+        "MSRValue": "0x3FBC000080",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and clean or shared data is transferred from remote cache.",
-        "MSRValue": "0x083fc00080 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+        "MSRValue": "0x083FC00080",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the modified data is transferred from remote cache.",
-        "MSRValue": "0x103fc00080 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+        "MSRValue": "0x103FC00080",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local or remote dram.",
-        "MSRValue": "0x063fc00080 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+        "MSRValue": "0x063FC00080",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from remote dram.",
-        "MSRValue": "0x063b800080 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+        "MSRValue": "0x063B800080",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local dram.",
-        "MSRValue": "0x0604000080 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+        "MSRValue": "0x0604000080",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3.",
-        "MSRValue": "0x3fbc000100 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+        "MSRValue": "0x3FBC000100",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
-        "MSRValue": "0x083fc00100 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+        "MSRValue": "0x083FC00100",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the modified data is transferred from remote cache.",
-        "MSRValue": "0x103fc00100 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+        "MSRValue": "0x103FC00100",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local or remote dram.",
-        "MSRValue": "0x063fc00100 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+        "MSRValue": "0x063FC00100",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from remote dram.",
-        "MSRValue": "0x063b800100 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+        "MSRValue": "0x063B800100",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local dram.",
-        "MSRValue": "0x0604000100 ",
+        "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+        "MSRValue": "0x0604000100",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss in the L3.",
-        "MSRValue": "0x3fbc000400 ",
+        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+        "MSRValue": "0x3FBC000400",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and clean or shared data is transferred from remote cache.",
-        "MSRValue": "0x083fc00400 ",
+        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+        "MSRValue": "0x083FC00400",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the modified data is transferred from remote cache.",
-        "MSRValue": "0x103fc00400 ",
+        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+        "MSRValue": "0x103FC00400",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local or remote dram.",
-        "MSRValue": "0x063fc00400 ",
+        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+        "MSRValue": "0x063FC00400",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from remote dram.",
-        "MSRValue": "0x063b800400 ",
+        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+        "MSRValue": "0x063B800400",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local dram.",
-        "MSRValue": "0x0604000400 ",
+        "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+        "MSRValue": "0x0604000400",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
-        "SampleAfterValue": "100003",
-        "CounterHTOff": "0,1,2,3"
-    },
-    {
-        "Offcore": "1",
-        "EventCode": "0xB7, 0xBB",
-        "UMask": "0x1",
-        "BriefDescription": "Counts any other requests that miss in the L3.",
-        "MSRValue": "0x3fbc008000 ",
-        "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts any other requests that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
-        "SampleAfterValue": "100003",
-        "CounterHTOff": "0,1,2,3"
-    },
-    {
-        "Offcore": "1",
-        "EventCode": "0xB7, 0xBB",
-        "UMask": "0x1",
-        "BriefDescription": "Counts any other requests that miss the L3 and clean or shared data is transferred from remote cache.",
-        "MSRValue": "0x083fc08000 ",
-        "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts any other requests that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
-        "SampleAfterValue": "100003",
-        "CounterHTOff": "0,1,2,3"
-    },
-    {
-        "Offcore": "1",
-        "EventCode": "0xB7, 0xBB",
-        "UMask": "0x1",
-        "BriefDescription": "Counts any other requests that miss the L3 and the modified data is transferred from remote cache.",
-        "MSRValue": "0x103fc08000 ",
-        "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts any other requests that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
-        "SampleAfterValue": "100003",
-        "CounterHTOff": "0,1,2,3"
-    },
-    {
-        "Offcore": "1",
-        "EventCode": "0xB7, 0xBB",
-        "UMask": "0x1",
-        "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from local or remote dram.",
-        "MSRValue": "0x063fc08000 ",
-        "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
-        "SampleAfterValue": "100003",
-        "CounterHTOff": "0,1,2,3"
-    },
-    {
-        "Offcore": "1",
-        "EventCode": "0xB7, 0xBB",
-        "UMask": "0x1",
-        "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from remote dram.",
-        "MSRValue": "0x063b808000 ",
-        "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
-        "SampleAfterValue": "100003",
-        "CounterHTOff": "0,1,2,3"
-    },
-    {
-        "Offcore": "1",
-        "EventCode": "0xB7, 0xBB",
-        "UMask": "0x1",
-        "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from local dram.",
-        "MSRValue": "0x0604008000 ",
-        "Counter": "0,1,2,3",
-        "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch data reads that miss in the L3.",
-        "MSRValue": "0x3fbc000490 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x3FBC000490",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache.",
-        "MSRValue": "0x083fc00490 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x083FC00490",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch data reads that miss the L3 and the modified data is transferred from remote cache.",
-        "MSRValue": "0x103fc00490 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x103FC00490",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local or remote dram.",
-        "MSRValue": "0x063fc00490 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x063FC00490",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from remote dram.",
-        "MSRValue": "0x063b800490 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x063B800490",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local dram.",
-        "MSRValue": "0x0604000490 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x0604000490",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch RFOs that miss in the L3.",
-        "MSRValue": "0x3fbc000120 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x3FBC000120",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
-        "MSRValue": "0x083fc00120 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x083FC00120",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch RFOs that miss the L3 and the modified data is transferred from remote cache.",
-        "MSRValue": "0x103fc00120 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x103FC00120",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local or remote dram.",
-        "MSRValue": "0x063fc00120 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x063FC00120",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from remote dram.",
-        "MSRValue": "0x063b800120 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x063B800120",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local dram.",
-        "MSRValue": "0x0604000120 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x0604000120",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3.",
-        "MSRValue": "0x3fbc000491 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x3FBC000491",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache.",
-        "MSRValue": "0x083fc00491 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x083FC00491",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache.",
-        "MSRValue": "0x103fc00491 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x103FC00491",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local or remote dram.",
-        "MSRValue": "0x063fc00491 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x063FC00491",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram.",
-        "MSRValue": "0x063b800491 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x063B800491",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram.",
-        "MSRValue": "0x0604000491 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x0604000491",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3.",
-        "MSRValue": "0x3fbc000122 ",
+        "BriefDescription": "TBD TBD TBD",
+        "MSRValue": "0x3FBC000122",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_SNOOP",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
-        "MSRValue": "0x083fc00122 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x083FC00122",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the modified data is transferred from remote cache.",
-        "MSRValue": "0x103fc00122 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x103FC00122",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HITM",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local or remote dram.",
-        "MSRValue": "0x063fc00122 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x063FC00122",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from remote dram.",
-        "MSRValue": "0x063b800122 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x063B800122",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     },
         "Offcore": "1",
         "EventCode": "0xB7, 0xBB",
         "UMask": "0x1",
-        "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram.",
-        "MSRValue": "0x0604000122 ",
+        "BriefDescription": "TBD TBD",
+        "MSRValue": "0x0604000122",
         "Counter": "0,1,2,3",
         "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
-        "MSRIndex": "0x1a6,0x1a7",
-        "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+        "MSRIndex": "0x1a6, 0x1a7",
+        "PublicDescription": "TBD TBD",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3"
     }
index f99f7ae27820c5b9a70d0af942f24863ee0d2f5f..369f56c1d1b5a4445f290517643a40bca95e195e 100644 (file)
@@ -1,6 +1,5 @@
 [
     {
-        "EventCode": "0x00",
         "UMask": "0x1",
         "BriefDescription": "Instructions retired from execution.",
         "Counter": "Fixed counter 0",
@@ -10,7 +9,6 @@
         "CounterHTOff": "Fixed counter 0"
     },
     {
-        "EventCode": "0x00",
         "UMask": "0x2",
         "BriefDescription": "Core cycles when the thread is not in halt state",
         "Counter": "Fixed counter 1",
@@ -20,7 +18,6 @@
         "CounterHTOff": "Fixed counter 1"
     },
     {
-        "EventCode": "0x00",
         "UMask": "0x2",
         "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
         "Counter": "Fixed counter 1",
@@ -30,7 +27,6 @@
         "CounterHTOff": "Fixed counter 1"
     },
     {
-        "EventCode": "0x00",
         "UMask": "0x3",
         "BriefDescription": "Reference cycles when the core is not in halt state.",
         "Counter": "Fixed counter 2",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
+        "Invert": "1",
         "EventCode": "0x0E",
         "UMask": "0x1",
-        "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+        "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
         "Counter": "0,1,2,3",
-        "EventName": "UOPS_ISSUED.ANY",
-        "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+        "EventName": "UOPS_ISSUED.STALL_CYCLES",
+        "CounterMask": "1",
+        "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "Invert": "1",
         "EventCode": "0x0E",
         "UMask": "0x1",
-        "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+        "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
         "Counter": "0,1,2,3",
-        "EventName": "UOPS_ISSUED.STALL_CYCLES",
-        "CounterMask": "1",
-        "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+        "EventName": "UOPS_ISSUED.ANY",
+        "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
         "Counter": "0,1,2,3",
         "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
-        "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
+        "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to \u201cMixing Intel AVX and Intel SSE Code\u201d section of the Optimization Guide.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x3C",
         "UMask": "0x1",
-        "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+        "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
         "Counter": "0,1,2,3",
-        "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+        "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+        "AnyThread": "1",
         "SampleAfterValue": "2503",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0x3C",
         "UMask": "0x1",
-        "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+        "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
         "Counter": "0,1,2,3",
-        "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
-        "AnyThread": "1",
+        "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
         "SampleAfterValue": "2503",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0x5E",
+        "EventCode": "0x59",
         "UMask": "0x1",
-        "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+        "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
         "Counter": "0,1,2,3",
-        "EventName": "RS_EVENTS.EMPTY_CYCLES",
-        "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
+        "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
+        "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
+    {
+        "EventCode": "0x5E",
+        "UMask": "0x1",
+        "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+        "Counter": "0,1,2,3",
+        "EventName": "RS_EVENTS.EMPTY_CYCLES",
+        "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
+        "SampleAfterValue": "2000003",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
     {
         "EventCode": "0x87",
         "UMask": "0x1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA2",
+        "EventCode": "0xa2",
         "UMask": "0x1",
         "BriefDescription": "Resource-related stall cycles",
         "Counter": "0,1,2,3",
         "EventName": "RESOURCE_STALLS.ANY",
-        "PublicDescription": "Counts resource-related stall cycles. Reasons for stalls can be as follows:a. *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots).b. *any* u-arch structure got empty (like INT/SIMD FreeLists).c. FPU control word (FPCW), MXCSR.and others. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+        "PublicDescription": "Counts resource-related stall cycles.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
+    {
+        "EventCode": "0xA8",
+        "UMask": "0x1",
+        "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+        "Counter": "0,1,2,3",
+        "EventName": "LSD.CYCLES_4_UOPS",
+        "CounterMask": "4",
+        "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
+        "SampleAfterValue": "2000003",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
     {
         "EventCode": "0xA8",
         "UMask": "0x1",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "EventCode": "0xA8",
+        "EventCode": "0xB1",
         "UMask": "0x1",
-        "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+        "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
         "Counter": "0,1,2,3",
-        "EventName": "LSD.CYCLES_4_UOPS",
+        "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
         "CounterMask": "4",
-        "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
+        "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xB1",
         "UMask": "0x1",
-        "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+        "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
         "Counter": "0,1,2,3",
-        "EventName": "UOPS_EXECUTED.THREAD",
-        "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+        "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+        "CounterMask": "3",
+        "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "Invert": "1",
         "EventCode": "0xB1",
         "UMask": "0x1",
-        "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+        "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
         "Counter": "0,1,2,3",
-        "EventName": "UOPS_EXECUTED.STALL_CYCLES",
-        "CounterMask": "1",
-        "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+        "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+        "CounterMask": "2",
+        "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
+        "Invert": "1",
         "EventCode": "0xB1",
         "UMask": "0x1",
-        "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
-        "Counter": "0,1,2,3",
-        "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
-        "CounterMask": "2",
-        "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
-        "SampleAfterValue": "2000003",
-        "CounterHTOff": "0,1,2,3,4,5,6,7"
-    },
-    {
-        "EventCode": "0xB1",
-        "UMask": "0x1",
-        "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+        "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
         "Counter": "0,1,2,3",
-        "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
-        "CounterMask": "3",
-        "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+        "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+        "CounterMask": "1",
+        "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xB1",
         "UMask": "0x1",
-        "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+        "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
         "Counter": "0,1,2,3",
-        "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
-        "CounterMask": "4",
-        "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+        "EventName": "UOPS_EXECUTED.THREAD",
+        "PublicDescription": "Number of uops to be executed per-thread each cycle.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
+        "Invert": "1",
         "EventCode": "0xB1",
         "UMask": "0x2",
-        "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+        "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
         "Counter": "0,1,2,3",
-        "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+        "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
         "CounterMask": "1",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     {
         "EventCode": "0xB1",
         "UMask": "0x2",
-        "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+        "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
         "Counter": "0,1,2,3",
-        "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
-        "CounterMask": "2",
+        "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+        "CounterMask": "4",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xB1",
         "UMask": "0x2",
-        "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+        "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
         "Counter": "0,1,2,3",
-        "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
-        "CounterMask": "4",
+        "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+        "CounterMask": "2",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "Invert": "1",
         "EventCode": "0xB1",
         "UMask": "0x2",
-        "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+        "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
         "Counter": "0,1,2,3",
-        "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+        "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
         "CounterMask": "1",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
+        "Invert": "1",
         "EventCode": "0xC2",
         "UMask": "0x2",
-        "BriefDescription": "Retirement slots used.",
+        "BriefDescription": "Cycles with less than 10 actually retired uops.",
         "Counter": "0,1,2,3",
-        "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
-        "PublicDescription": "Counts the retirement slots used.",
+        "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+        "CounterMask": "10",
+        "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "Counter": "0,1,2,3",
         "EventName": "UOPS_RETIRED.STALL_CYCLES",
         "CounterMask": "1",
-        "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+        "PublicDescription": "This event counts cycles without actually retired uops.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
-        "Invert": "1",
         "EventCode": "0xC2",
         "UMask": "0x2",
-        "BriefDescription": "Cycles with less than 10 actually retired uops.",
+        "BriefDescription": "Retirement slots used.",
         "Counter": "0,1,2,3",
-        "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
-        "CounterMask": "10",
-        "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+        "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+        "PublicDescription": "Counts the retirement slots used.",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
         "Counter": "0,1,2,3",
         "EventName": "MACHINE_CLEARS.COUNT",
         "CounterMask": "1",
+        "PublicDescription": "Number of machine clears (nukes) of any type.",
         "SampleAfterValue": "100003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xC4",
         "UMask": "0x10",
-        "BriefDescription": "Not taken branch instructions retired.",
+        "BriefDescription": "Counts all not taken macro branch instructions retired.",
+        "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "BR_INST_RETIRED.NOT_TAKEN",
         "Errata": "SKL091",
-        "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+        "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts not taken branch instructions retired.",
         "SampleAfterValue": "400009",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
     {
         "EventCode": "0xC5",
         "UMask": "0x20",
-        "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken. ",
+        "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
         "PEBS": "1",
         "Counter": "0,1,2,3",
         "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
         "SampleAfterValue": "2000003",
         "CounterHTOff": "0,1,2,3,4,5,6,7"
     },
+    {
+        "EventCode": "0xCC",
+        "UMask": "0x40",
+        "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
+        "Counter": "0,1,2,3",
+        "EventName": "ROB_MISC_EVENTS.PAUSE_INST",
+        "SampleAfterValue": "2000003",
+        "CounterHTOff": "0,1,2,3,4,5,6,7"
+    },
     {
         "EventCode": "0xE6",
         "UMask": "0x1",
index 71e9737f4614dba62fd60d740c42201499a1f480..56e03ba771f48fb2ebecf0a288100751ad068573 100644 (file)
 [
     {
-        "BriefDescription": "Instructions Per Cycle (per logical thread)",
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Frontend_Bound"
+    },
+    {
+        "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Frontend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Bad_Speculation"
+    },
+    {
+        "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Bad_Speculation_SMT"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Backend_Bound"
+    },
+    {
+        "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+        "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Backend_Bound_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+        "MetricGroup": "TopdownL1",
+        "MetricName": "Retiring"
+    },
+    {
+        "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+        "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved.  Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+        "MetricGroup": "TopdownL1_SMT",
+        "MetricName": "Retiring_SMT"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Instructions Per Cycle (per logical thread)",
         "MetricGroup": "TopDownL1",
         "MetricName": "IPC"
     },
     {
-        "BriefDescription": "Uops Per Instruction",
         "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
-        "MetricGroup": "Pipeline",
+        "BriefDescription": "Uops Per Instruction",
+        "MetricGroup": "Pipeline;Retiring",
         "MetricName": "UPI"
     },
     {
-        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
-        "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
-        "MetricGroup": "Frontend",
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Instruction per taken branch",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "IpTB"
+    },
+    {
+        "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+        "BriefDescription": "Branch instructions per taken branch. ",
+        "MetricGroup": "Branches;PGO",
+        "MetricName": "BpTB"
+    },
+    {
+        "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
+        "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+        "MetricGroup": "PGO",
         "MetricName": "IFetch_Line_Utilization"
     },
     {
-        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
-        "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
-        "MetricGroup": "DSB; Frontend_Bandwidth",
+        "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))",
+        "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+        "MetricGroup": "DSB;Frontend_Bandwidth",
         "MetricName": "DSB_Coverage"
     },
     {
-        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+        "BriefDescription": "Cycles Per Instruction (threaded)",
         "MetricGroup": "Pipeline;Summary",
         "MetricName": "CPI"
     },
     {
-        "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+        "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
         "MetricGroup": "Summary",
         "MetricName": "CLKS"
     },
     {
-        "BriefDescription": "Total issue-pipeline slots",
-        "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+        "MetricExpr": "4 * cycles",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
         "MetricGroup": "TopDownL1",
         "MetricName": "SLOTS"
     },
     {
-        "BriefDescription": "Total number of retired Instructions",
+        "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Total issue-pipeline slots (per core)",
+        "MetricGroup": "TopDownL1_SMT",
+        "MetricName": "SLOTS_SMT"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+        "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+        "MetricGroup": "Instruction_Type;L1_Bound",
+        "MetricName": "IpL"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+        "BriefDescription": "Instructions per Store",
+        "MetricGroup": "Instruction_Type;Store_Bound",
+        "MetricName": "IpS"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Instructions per Branch",
+        "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+        "MetricName": "IpB"
+    },
+    {
+        "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+        "BriefDescription": "Instruction per (near) call",
+        "MetricGroup": "Branches",
+        "MetricName": "IpCall"
+    },
+    {
         "MetricExpr": "INST_RETIRED.ANY",
+        "BriefDescription": "Total number of retired Instructions",
         "MetricGroup": "Summary",
         "MetricName": "Instructions"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / cycles",
         "BriefDescription": "Instructions Per Cycle (per physical core)",
-        "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
         "MetricGroup": "SMT",
         "MetricName": "CoreIPC"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Instructions Per Cycle (per physical core)",
+        "MetricGroup": "SMT",
+        "MetricName": "CoreIPC_SMT"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS",
+        "MetricName": "FLOPc"
+    },
+    {
+        "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+        "BriefDescription": "Floating Point Operations Per Cycle",
+        "MetricGroup": "FLOPS_SMT",
+        "MetricName": "FLOPc_SMT"
+    },
+    {
+        "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
         "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
-        "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
         "MetricGroup": "Pipeline;Ports_Utilization",
         "MetricName": "ILP"
     },
     {
-        "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
-        "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL  - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
-        "MetricGroup": "Unknown_Branches",
-        "MetricName": "BAClear_Cost"
+        "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+        "MetricGroup": "Branch_Mispredicts",
+        "MetricName": "Branch_Misprediction_Cost"
+    },
+    {
+        "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+        "MetricGroup": "Branch_Mispredicts_SMT",
+        "MetricName": "Branch_Misprediction_Cost_SMT"
     },
     {
+        "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+        "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+        "MetricGroup": "Branch_Mispredicts",
+        "MetricName": "IpMispredict"
+    },
+    {
+        "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
         "BriefDescription": "Core actual clocks when any thread is active on the physical core",
-        "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
         "MetricGroup": "SMT",
         "MetricName": "CORE_CLKS"
     },
     {
-        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
         "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
+        "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
         "MetricGroup": "Memory_Bound;Memory_Lat",
         "MetricName": "Load_Miss_Real_Latency"
     },
     {
-        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
-        "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+        "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+        "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
         "MetricGroup": "Memory_Bound;Memory_BW",
         "MetricName": "MLP"
     },
     {
+        "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
         "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
-        "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
         "MetricGroup": "TLB",
         "MetricName": "Page_Walks_Utilization"
     },
     {
-        "BriefDescription": "Average CPU Utilization",
+        "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
+        "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+        "MetricGroup": "TLB_SMT",
+        "MetricName": "Page_Walks_Utilization_SMT"
+    },
+    {
+        "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L1D_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+        "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L2_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+        "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L3_Cache_Fill_BW"
+    },
+    {
+        "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+        "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "L3_Cache_Access_BW"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L1MPKI"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI"
+    },
+    {
+        "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2MPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+        "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L2HPKI_All"
+    },
+    {
+        "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+        "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+        "MetricGroup": "Cache_Misses;",
+        "MetricName": "L3MPKI"
+    },
+    {
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+        "BriefDescription": "Average CPU Utilization",
         "MetricGroup": "Summary",
         "MetricName": "CPU_Utilization"
     },
     {
+        "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
         "BriefDescription": "Giga Floating Point Operations Per Second",
-        "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
         "MetricGroup": "FLOPS;Summary",
         "MetricName": "GFLOPs"
     },
     {
-        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Average Frequency Utilization relative nominal frequency",
         "MetricGroup": "Power",
         "MetricName": "Turbo_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+        "BriefDescription": "Fraction of cycles where both hardware threads were active",
         "MetricGroup": "SMT;Summary",
         "MetricName": "SMT_2T_Utilization"
     },
     {
-        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+        "BriefDescription": "Fraction of cycles spent in Kernel mode",
         "MetricGroup": "Summary",
         "MetricName": "Kernel_Utilization"
     },
     {
-        "BriefDescription": "C3 residency percent per core",
+        "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+        "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "DRAM_BW_Use"
+    },
+    {
+        "MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
+        "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+        "MetricGroup": "Memory_Lat",
+        "MetricName": "DRAM_Read_Latency"
+    },
+    {
+        "MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@",
+        "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "DRAM_Parallel_Reads"
+    },
+    {
+        "MetricExpr": "( 1000000000 * ( imc@event\\=0xe0\\\\\\,umask\\=0x1@ / imc@event\\=0xe3@ ) / imc_0@event\\=0x0@ ) if 1 if 0 == 1 else 0 else 0",
+        "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
+        "MetricGroup": "Memory_Lat",
+        "MetricName": "MEM_PMM_Read_Latency"
+    },
+    {
+        "MetricExpr": "( ( 64 * imc@event\\=0xe3@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0",
+        "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "PMM_Read_BW"
+    },
+    {
+        "MetricExpr": "( ( 64 * imc@event\\=0xe7@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0",
+        "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+        "MetricGroup": "Memory_BW",
+        "MetricName": "PMM_Write_BW"
+    },
+    {
+        "MetricExpr": "cha_0@event\\=0x0@",
+        "BriefDescription": "Socket actual clocks when any core is active on that socket",
+        "MetricGroup": "",
+        "MetricName": "Socket_CLKS"
+    },
+    {
         "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per core",
         "MetricName": "C3_Core_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per core",
         "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per core",
         "MetricName": "C6_Core_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per core",
         "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per core",
         "MetricName": "C7_Core_Residency"
     },
     {
-        "BriefDescription": "C2 residency percent per package",
         "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C2 residency percent per package",
         "MetricName": "C2_Pkg_Residency"
     },
     {
-        "BriefDescription": "C3 residency percent per package",
         "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C3 residency percent per package",
         "MetricName": "C3_Pkg_Residency"
     },
     {
-        "BriefDescription": "C6 residency percent per package",
         "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C6 residency percent per package",
         "MetricName": "C6_Pkg_Residency"
     },
     {
-        "BriefDescription": "C7 residency percent per package",
         "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
         "MetricGroup": "Power",
+        "BriefDescription": "C7 residency percent per package",
         "MetricName": "C7_Pkg_Residency"
     }
 ]
index 390a351d15eada0bb7d19805e54aa8fe60732829..c3eae1d77d366d0819a4b7d39df87099d2342307 100644 (file)
@@ -10,6 +10,8 @@
 # FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 # more details.
 
+from __future__ import print_function
+
 import os
 import sys
 import struct
@@ -199,6 +201,18 @@ import datetime
 
 from PySide.QtSql import *
 
+if sys.version_info < (3, 0):
+       def toserverstr(str):
+               return str
+       def toclientstr(str):
+               return str
+else:
+       # Assume UTF-8 server_encoding and client_encoding
+       def toserverstr(str):
+               return bytes(str, "UTF_8")
+       def toclientstr(str):
+               return bytes(str, "UTF_8")
+
 # Need to access PostgreSQL C library directly to use COPY FROM STDIN
 from ctypes import *
 libpq = CDLL("libpq.so.5")
@@ -234,12 +248,17 @@ perf_db_export_mode = True
 perf_db_export_calls = False
 perf_db_export_callchains = False
 
+def printerr(*args, **kw_args):
+       print(*args, file=sys.stderr, **kw_args)
+
+def printdate(*args, **kw_args):
+        print(datetime.datetime.today(), *args, sep=' ', **kw_args)
 
 def usage():
-       print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]"
-       print >> sys.stderr, "where:    columns         'all' or 'branches'"
-       print >> sys.stderr, "          calls           'calls' => create calls and call_paths table"
-       print >> sys.stderr, "          callchains      'callchains' => create call_paths table"
+       printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]")
+       printerr("where:        columns         'all' or 'branches'")
+       printerr("              calls           'calls' => create calls and call_paths table")
+       printerr("              callchains      'callchains' => create call_paths table")
        raise Exception("Too few arguments")
 
 if (len(sys.argv) < 2):
@@ -273,7 +292,7 @@ def do_query(q, s):
                return
        raise Exception("Query failed: " + q.lastError().text())
 
-print datetime.datetime.today(), "Creating database..."
+printdate("Creating database...")
 
 db = QSqlDatabase.addDatabase('QPSQL')
 query = QSqlQuery(db)
@@ -506,12 +525,12 @@ do_query(query, 'CREATE VIEW samples_view AS '
        ' FROM samples')
 
 
-file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
-file_trailer = "\377\377"
+file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0)
+file_trailer = b"\377\377"
 
 def open_output_file(file_name):
        path_name = output_dir_name + "/" + file_name
-       file = open(path_name, "w+")
+       file = open(path_name, "wb+")
        file.write(file_header)
        return file
 
@@ -526,13 +545,13 @@ def copy_output_file_direct(file, table_name):
 
 # Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
 def copy_output_file(file, table_name):
-       conn = PQconnectdb("dbname = " + dbname)
+       conn = PQconnectdb(toclientstr("dbname = " + dbname))
        if (PQstatus(conn)):
                raise Exception("COPY FROM STDIN PQconnectdb failed")
        file.write(file_trailer)
        file.seek(0)
        sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
-       res = PQexec(conn, sql)
+       res = PQexec(conn, toclientstr(sql))
        if (PQresultStatus(res) != 4):
                raise Exception("COPY FROM STDIN PQexec failed")
        data = file.read(65536)
@@ -566,7 +585,7 @@ if perf_db_export_calls:
        call_file               = open_output_file("call_table.bin")
 
 def trace_begin():
-       print datetime.datetime.today(), "Writing to intermediate files..."
+       printdate("Writing to intermediate files...")
        # id == 0 means unknown.  It is easier to create records for them than replace the zeroes with NULLs
        evsel_table(0, "unknown")
        machine_table(0, 0, "unknown")
@@ -582,7 +601,7 @@ def trace_begin():
 unhandled_count = 0
 
 def trace_end():
-       print datetime.datetime.today(), "Copying to database..."
+       printdate("Copying to database...")
        copy_output_file(evsel_file,            "selected_events")
        copy_output_file(machine_file,          "machines")
        copy_output_file(thread_file,           "threads")
@@ -597,7 +616,7 @@ def trace_end():
        if perf_db_export_calls:
                copy_output_file(call_file,             "calls")
 
-       print datetime.datetime.today(), "Removing intermediate files..."
+       printdate("Removing intermediate files...")
        remove_output_file(evsel_file)
        remove_output_file(machine_file)
        remove_output_file(thread_file)
@@ -612,7 +631,7 @@ def trace_end():
        if perf_db_export_calls:
                remove_output_file(call_file)
        os.rmdir(output_dir_name)
-       print datetime.datetime.today(), "Adding primary keys"
+       printdate("Adding primary keys")
        do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
        do_query(query, 'ALTER TABLE machines        ADD PRIMARY KEY (id)')
        do_query(query, 'ALTER TABLE threads         ADD PRIMARY KEY (id)')
@@ -627,7 +646,7 @@ def trace_end():
        if perf_db_export_calls:
                do_query(query, 'ALTER TABLE calls           ADD PRIMARY KEY (id)')
 
-       print datetime.datetime.today(), "Adding foreign keys"
+       printdate("Adding foreign keys")
        do_query(query, 'ALTER TABLE threads '
                                        'ADD CONSTRAINT machinefk  FOREIGN KEY (machine_id)   REFERENCES machines   (id),'
                                        'ADD CONSTRAINT processfk  FOREIGN KEY (process_id)   REFERENCES threads    (id)')
@@ -663,8 +682,8 @@ def trace_end():
                do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
 
        if (unhandled_count):
-               print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
-       print datetime.datetime.today(), "Done"
+               printdate("Warning: ", unhandled_count, " unhandled events")
+       printdate("Done")
 
 def trace_unhandled(event_name, context, event_fields_dict):
        global unhandled_count
@@ -674,12 +693,14 @@ def sched__sched_switch(*x):
        pass
 
 def evsel_table(evsel_id, evsel_name, *x):
+       evsel_name = toserverstr(evsel_name)
        n = len(evsel_name)
        fmt = "!hiqi" + str(n) + "s"
        value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
        evsel_file.write(value)
 
 def machine_table(machine_id, pid, root_dir, *x):
+       root_dir = toserverstr(root_dir)
        n = len(root_dir)
        fmt = "!hiqiii" + str(n) + "s"
        value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
@@ -690,6 +711,7 @@ def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
        thread_file.write(value)
 
 def comm_table(comm_id, comm_str, *x):
+       comm_str = toserverstr(comm_str)
        n = len(comm_str)
        fmt = "!hiqi" + str(n) + "s"
        value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
@@ -701,6 +723,9 @@ def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
        comm_thread_file.write(value)
 
 def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
+       short_name = toserverstr(short_name)
+       long_name = toserverstr(long_name)
+       build_id = toserverstr(build_id)
        n1 = len(short_name)
        n2 = len(long_name)
        n3 = len(build_id)
@@ -709,12 +734,14 @@ def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
        dso_file.write(value)
 
 def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
+       symbol_name = toserverstr(symbol_name)
        n = len(symbol_name)
        fmt = "!hiqiqiqiqiii" + str(n) + "s"
        value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
        symbol_file.write(value)
 
 def branch_type_table(branch_type, name, *x):
+       name = toserverstr(name)
        n = len(name)
        fmt = "!hiii" + str(n) + "s"
        value = struct.pack(fmt, 2, 4, branch_type, n, name)
index eb63e6c7107fdb29ea7fee0d6606794186c543f9..bf271fbc3a885f509d78ee60fbb0138fd12df53f 100644 (file)
@@ -10,6 +10,8 @@
 # FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 # more details.
 
+from __future__ import print_function
+
 import os
 import sys
 import struct
@@ -60,11 +62,17 @@ perf_db_export_mode = True
 perf_db_export_calls = False
 perf_db_export_callchains = False
 
+def printerr(*args, **keyword_args):
+       print(*args, file=sys.stderr, **keyword_args)
+
+def printdate(*args, **kw_args):
+        print(datetime.datetime.today(), *args, sep=' ', **kw_args)
+
 def usage():
-       print >> sys.stderr, "Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]"
-       print >> sys.stderr, "where:    columns         'all' or 'branches'"
-       print >> sys.stderr, "          calls           'calls' => create calls and call_paths table"
-       print >> sys.stderr, "          callchains      'callchains' => create call_paths table"
+       printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]");
+       printerr("where:        columns         'all' or 'branches'");
+       printerr("              calls           'calls' => create calls and call_paths table");
+       printerr("              callchains      'callchains' => create call_paths table");
        raise Exception("Too few arguments")
 
 if (len(sys.argv) < 2):
@@ -100,7 +108,7 @@ def do_query_(q):
                return
        raise Exception("Query failed: " + q.lastError().text())
 
-print datetime.datetime.today(), "Creating database..."
+printdate("Creating database ...")
 
 db_exists = False
 try:
@@ -323,7 +331,7 @@ if perf_db_export_calls:
                        'return_id,'
                        'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
                        'parent_call_path_id,'
-                       'parent_id'
+                       'calls.parent_id'
                ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
 
 do_query(query, 'CREATE VIEW samples_view AS '
@@ -378,7 +386,7 @@ if perf_db_export_calls:
        call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
 
 def trace_begin():
-       print datetime.datetime.today(), "Writing records..."
+       printdate("Writing records...")
        do_query(query, 'BEGIN TRANSACTION')
        # id == 0 means unknown.  It is easier to create records for them than replace the zeroes with NULLs
        evsel_table(0, "unknown")
@@ -397,14 +405,14 @@ unhandled_count = 0
 def trace_end():
        do_query(query, 'END TRANSACTION')
 
-       print datetime.datetime.today(), "Adding indexes"
+       printdate("Adding indexes")
        if perf_db_export_calls:
                do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
                do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
 
        if (unhandled_count):
-               print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
-       print datetime.datetime.today(), "Done"
+               printdate("Warning: ", unhandled_count, " unhandled events")
+       printdate("Done")
 
 def trace_unhandled(event_name, context, event_fields_dict):
        global unhandled_count
index afec9479ca7fd273a9ddcbcb7de3a823605db8fc..74ef92f1d19ad6637140829497997cc2cf8b686e 100755 (executable)
 #                                                                              7fab593ea956 48 89 15 3b 13 22 00                            movq  %rdx, 0x22133b(%rip)
 # 8107675243232  2    ls       22011  22011  hardware interrupt     No         7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
 
+from __future__ import print_function
+
 import sys
 import weakref
 import threading
 import string
-import cPickle
+try:
+       # Python2
+       import cPickle as pickle
+       # size of pickled integer big enough for record size
+       glb_nsz = 8
+except ImportError:
+       import pickle
+       glb_nsz = 16
 import re
 import os
 from PySide.QtCore import *
 from PySide.QtGui import *
 from PySide.QtSql import *
+pyside_version_1 = True
 from decimal import *
 from ctypes import *
 from multiprocessing import Process, Array, Value, Event
 
+# xrange is range in Python3
+try:
+       xrange
+except NameError:
+       xrange = range
+
+def printerr(*args, **keyword_args):
+       print(*args, file=sys.stderr, **keyword_args)
+
 # Data formatting helpers
 
 def tohex(ip):
@@ -1004,10 +1023,6 @@ class ChildDataItemFinder():
 
 glb_chunk_sz = 10000
 
-# size of pickled integer big enough for record size
-
-glb_nsz = 8
-
 # Background process for SQL data fetcher
 
 class SQLFetcherProcess():
@@ -1066,7 +1081,7 @@ class SQLFetcherProcess():
                                return True
                        if space >= glb_nsz:
                                # Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
-                               nd = cPickle.dumps(0, cPickle.HIGHEST_PROTOCOL)
+                               nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL)
                                self.buffer[self.local_head : self.local_head + len(nd)] = nd
                        self.local_head = 0
                if self.local_tail - self.local_head > sz:
@@ -1084,9 +1099,9 @@ class SQLFetcherProcess():
                        self.wait_event.wait()
 
        def AddToBuffer(self, obj):
-               d = cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL)
+               d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
                n = len(d)
-               nd = cPickle.dumps(n, cPickle.HIGHEST_PROTOCOL)
+               nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL)
                sz = n + glb_nsz
                self.WaitForSpace(sz)
                pos = self.local_head
@@ -1198,12 +1213,12 @@ class SQLFetcher(QObject):
                pos = self.local_tail
                if len(self.buffer) - pos < glb_nsz:
                        pos = 0
-               n = cPickle.loads(self.buffer[pos : pos + glb_nsz])
+               n = pickle.loads(self.buffer[pos : pos + glb_nsz])
                if n == 0:
                        pos = 0
-                       n = cPickle.loads(self.buffer[0 : glb_nsz])
+                       n = pickle.loads(self.buffer[0 : glb_nsz])
                pos += glb_nsz
-               obj = cPickle.loads(self.buffer[pos : pos + n])
+               obj = pickle.loads(self.buffer[pos : pos + n])
                self.local_tail = pos + n
                return obj
 
@@ -1512,6 +1527,19 @@ def BranchDataPrep(query):
                        " (" + dsoname(query.value(15)) + ")")
        return data
 
+def BranchDataPrepWA(query):
+       data = []
+       data.append(query.value(0))
+       # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+       data.append("{:>19}".format(query.value(1)))
+       for i in xrange(2, 8):
+               data.append(query.value(i))
+       data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
+                       " (" + dsoname(query.value(11)) + ")" + " -> " +
+                       tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
+                       " (" + dsoname(query.value(15)) + ")")
+       return data
+
 # Branch data model
 
 class BranchModel(TreeModel):
@@ -1539,7 +1567,11 @@ class BranchModel(TreeModel):
                        " AND evsel_id = " + str(self.event_id) +
                        " ORDER BY samples.id"
                        " LIMIT " + str(glb_chunk_sz))
-               self.fetcher = SQLFetcher(glb, sql, BranchDataPrep, self.AddSample)
+               if pyside_version_1 and sys.version_info[0] == 3:
+                       prep = BranchDataPrepWA
+               else:
+                       prep = BranchDataPrep
+               self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
                self.fetcher.done.connect(self.Update)
                self.fetcher.Fetch(glb_chunk_sz)
 
@@ -2065,14 +2097,6 @@ def IsSelectable(db, table, sql = ""):
                return False
        return True
 
-# SQL data preparation
-
-def SQLTableDataPrep(query, count):
-       data = []
-       for i in xrange(count):
-               data.append(query.value(i))
-       return data
-
 # SQL table data model item
 
 class SQLTableItem():
@@ -2096,7 +2120,7 @@ class SQLTableModel(TableModel):
                self.more = True
                self.populated = 0
                self.column_headers = column_headers
-               self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): SQLTableDataPrep(x, y), self.AddSample)
+               self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample)
                self.fetcher.done.connect(self.Update)
                self.fetcher.Fetch(glb_chunk_sz)
 
@@ -2140,6 +2164,12 @@ class SQLTableModel(TableModel):
        def columnHeader(self, column):
                return self.column_headers[column]
 
+       def SQLTableDataPrep(self, query, count):
+               data = []
+               for i in xrange(count):
+                       data.append(query.value(i))
+               return data
+
 # SQL automatic table data model
 
 class SQLAutoTableModel(SQLTableModel):
@@ -2168,8 +2198,32 @@ class SQLAutoTableModel(SQLTableModel):
                        QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
                        while query.next():
                                column_headers.append(query.value(0))
+               if pyside_version_1 and sys.version_info[0] == 3:
+                       if table_name == "samples_view":
+                               self.SQLTableDataPrep = self.samples_view_DataPrep
+                       if table_name == "samples":
+                               self.SQLTableDataPrep = self.samples_DataPrep
                super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
 
+       def samples_view_DataPrep(self, query, count):
+               data = []
+               data.append(query.value(0))
+               # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+               data.append("{:>19}".format(query.value(1)))
+               for i in xrange(2, count):
+                       data.append(query.value(i))
+               return data
+
+       def samples_DataPrep(self, query, count):
+               data = []
+               for i in xrange(9):
+                       data.append(query.value(i))
+               # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+               data.append("{:>19}".format(query.value(9)))
+               for i in xrange(10, count):
+                       data.append(query.value(i))
+               return data
+
 # Base class for custom ResizeColumnsToContents
 
 class ResizeColumnsToContentsBase(QObject):
@@ -2854,9 +2908,13 @@ class LibXED():
                ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
                if not ok:
                        return 0, ""
+               if sys.version_info[0] == 2:
+                       result = inst.buffer.value
+               else:
+                       result = inst.buffer.value.decode()
                # Return instruction length and the disassembled instruction text
                # For now, assume the length is in byte 166
-               return inst.xedd[166], inst.buffer.value
+               return inst.xedd[166], result
 
 def TryOpen(file_name):
        try:
@@ -2872,9 +2930,14 @@ def Is64Bit(f):
        header = f.read(7)
        f.seek(pos)
        magic = header[0:4]
-       eclass = ord(header[4])
-       encoding = ord(header[5])
-       version = ord(header[6])
+       if sys.version_info[0] == 2:
+               eclass = ord(header[4])
+               encoding = ord(header[5])
+               version = ord(header[6])
+       else:
+               eclass = header[4]
+               encoding = header[5]
+               version = header[6]
        if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1:
                result = True if eclass == 2 else False
        return result
@@ -2973,7 +3036,7 @@ class DBRef():
 
 def Main():
        if (len(sys.argv) < 2):
-               print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}"
+               printerr("Usage is: exported-sql-viewer.py {<database name> | --help-only}");
                raise Exception("Too few arguments")
 
        dbname = sys.argv[1]
@@ -2986,8 +3049,8 @@ def Main():
 
        is_sqlite3 = False
        try:
-               f = open(dbname)
-               if f.read(15) == "SQLite format 3":
+               f = open(dbname, "rb")
+               if f.read(15) == b'SQLite format 3':
                        is_sqlite3 = True
                f.close()
        except:
index cb0a3138fa548bb62fa7a0a22bdb9e77f8e84341..93818054ae2086e4818f14231ef9c731d27cb71d 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -C 0 kill >/dev/null 2>&1
+args    = --no-bpf-event -C 0 kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 85a23cf35ba14dda52997598d5c78afb411ae812..b0ca42a5ecc9ce50167e3ef62ef3dbf0a72e46c5 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = kill >/dev/null 2>&1
+args    = --no-bpf-event kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 81f839e2fad019ca43bb5c6be779ab26b79a0afd..1a99b3ce6b899c0730886308ddb08bdbc2a6409a 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -b kill >/dev/null 2>&1
+args    = --no-bpf-event -b kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 357421f4dfcedd07c7e9a98055f2145b8c3c987d..709768b508c624a8965f3a5e62ad7f39e980296a 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -j any kill >/dev/null 2>&1
+args    = --no-bpf-event -j any kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index dbc55f2ab845843a42272374bddfbc256cf58b7f..f943221f782543d84b83a8e67a177a1b7927b51a 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -j any_call kill >/dev/null 2>&1
+args    = --no-bpf-event -j any_call kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index a0824ff8e131d2e38e2ebd2d94bf07f9f7044878..fd4f5b4154a9d389ffb43943ad4447a97fa28b2d 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -j any_ret kill >/dev/null 2>&1
+args    = --no-bpf-event -j any_ret kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index f34d6f120181e40aef44ec175e8dd4af4826b4d8..4e52d685ebe1724224b743043b6c2e034b425b3e 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -j hv kill >/dev/null 2>&1
+args    = --no-bpf-event -j hv kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index b86a352322487e78e362c9cc74af15946f91c247..e08c6ab3796e01d0bbd7867ef59958b1c647066f 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -j ind_call kill >/dev/null 2>&1
+args    = --no-bpf-event -j ind_call kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index d3fbc5e1858a649611ad4d1541bb9f159d7f0e5a..b4b98f84fc2f75cbe7f5b5a9b6d2121a0efd0b00 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -j k kill >/dev/null 2>&1
+args    = --no-bpf-event -j k kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index a318f0dda173669cbf3d07de3b4b1c3b145b8ba9..fb9610edbb0d7ef00c1e4ca65109df5300cc8e4f 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -j u kill >/dev/null 2>&1
+args    = --no-bpf-event -j u kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 34f6cc5772636e11de8a3efbf292c12e23a99f3b..5e9b9019d7865c1d2221f1b9585ae2678022cca3 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -c 123 kill >/dev/null 2>&1
+args    = --no-bpf-event -c 123 kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index a9cf2233b0cef33f5decbdef859823c7339a2455..a99bb13149c20d6042f46f0d668571e98cdbc68f 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -d kill >/dev/null 2>&1
+args    = --no-bpf-event -d kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index bf4cb459f0d5c4a195e5de68a4c9efaf232b2b6a..89e29f6b2ae0cf8cf8670198cc78131f15dd43fe 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -F 100 kill >/dev/null 2>&1
+args    = --no-bpf-event -F 100 kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 0b216e69760cd695786fe132c287ddd451e1570f..5d8234d508452ed3c6263b3df5b69835adc47725 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -g kill >/dev/null 2>&1
+args    = --no-bpf-event -g kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index da2fa73bd0a2801876e8acd9f9fe364909a7508d..ae92061d611ded68ebc072db4745a80a18c4db76 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = --call-graph dwarf -- kill >/dev/null 2>&1
+args    = --no-bpf-event --call-graph dwarf -- kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 625d190bb798e2bce72f306e8b34af9bf4cc3a50..5630521c0b0f3f401004aa65b0f20e033fd3dc22 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = --call-graph fp kill >/dev/null 2>&1
+args    = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 618ba1c174741c46ea730297383d8a67574be252..14ee60fd3f410f53b131f7bf5f65dd461ffdddc9 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = --group -e cycles,instructions kill >/dev/null 2>&1
+args    = --no-bpf-event --group -e cycles,instructions kill >/dev/null 2>&1
 ret     = 1
 
 [event-1:base-record]
index f0729c454f160bed941b16133b9ac437c973404d..300b9f7e6d6938f9cc04879df757497d09ff26d2 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
+args    = --no-bpf-event -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
 ret     = 1
 
 [event-1:base-record]
index 48e8bd12fe4676f166cf74c512f1ec52d9ae93d6..3ffe246e02283970178bc65e87c3e2656299fa0c 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -e '{cycles,instructions}' kill >/dev/null 2>&1
+args    = --no-bpf-event -e '{cycles,instructions}' kill >/dev/null 2>&1
 ret     = 1
 
 [event-1:base-record]
index aa3956d8fe207f7ee28bfa3ebbc03511730212e4..583dcbb078bad826cd2212193654a293f8979f39 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = --no-buffering kill >/dev/null 2>&1
+args    = --no-bpf-event --no-buffering kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 560943decb87cd5ea2fd28a2b5ccc301109f3c7e..15d1dc162e1c3ec754b5dc52efac1a1823ff6164 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -i kill >/dev/null 2>&1
+args    = --no-bpf-event -i kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 8eb73ab639e0a7ba730bdbf2876958ef62fad7de..596fbd6d5a2ccdfd2f9d20bb44fe3acd12b319cf 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -n kill >/dev/null 2>&1
+args    = --no-bpf-event -n kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 69bc748f0f27db32a8a88e08b9a5b62b17632312..119101154c5ee97d0775ad72a822f2d27c794d6d 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -c 100 -P kill >/dev/null 2>&1
+args    = --no-bpf-event -c 100 -P kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index a188a614a44c5827a9ed373ac03129aa25e42b3c..13a5f7860c786c395c3195c17cf508bc7c065d1c 100644 (file)
@@ -1,6 +1,6 @@
 [config]
 command = record
-args    = -R kill >/dev/null 2>&1
+args    = --no-bpf-event -R kill >/dev/null 2>&1
 ret     = 1
 
 [event:base-record]
index 6d598cc071ae23d5f0f9c5210077a7efaff79d7d..1a9c3becf5ffb89d723e7c636e5be3e31c10053c 100644 (file)
@@ -18,7 +18,7 @@ static void testcase(void)
        int i;
 
        for (i = 0; i < NR_ITERS; i++) {
-               char proc_name[10];
+               char proc_name[15];
 
                snprintf(proc_name, sizeof(proc_name), "p:%d\n", i);
                prctl(PR_SET_NAME, proc_name);
index ea7acf403727eb99ec2111218f95059da35e27d4..71f60c0f9faa1fc9fd59e2b7d35b53c7b92d2812 100644 (file)
@@ -85,5 +85,6 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
        if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
                ret = -1;
 
+       perf_evsel__delete(evsel);
        return ret;
 }
index 01f0706995a9737c4bf7ff9012228f6bda8d11fa..9acc1e80b93673c018f81296314f6c4e7363ca70 100644 (file)
@@ -19,7 +19,7 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
        const char *p;
        const char **other;
        double val;
-       int ret;
+       int i, ret;
        struct parse_ctx ctx;
        int num_other;
 
@@ -56,6 +56,9 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
        TEST_ASSERT_VAL("find other", !strcmp(other[1], "BAZ"));
        TEST_ASSERT_VAL("find other", !strcmp(other[2], "BOZO"));
        TEST_ASSERT_VAL("find other", other[3] == NULL);
+
+       for (i = 0; i < num_other; i++)
+               free((void *)other[i]);
        free((void *)other);
 
        return 0;
index c531e6deb104799d733f47fbee405b5dca4b861c..493ecb61154026b0ee777feef5043681aa784973 100644 (file)
@@ -45,7 +45,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
        if (IS_ERR(evsel)) {
                tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
                pr_debug("%s\n", errbuf);
-               goto out_thread_map_delete;
+               goto out_cpu_map_delete;
        }
 
        if (perf_evsel__open(evsel, cpus, threads) < 0) {
@@ -119,6 +119,8 @@ out_close_fd:
        perf_evsel__close_fd(evsel);
 out_evsel_delete:
        perf_evsel__delete(evsel);
+out_cpu_map_delete:
+       cpu_map__put(cpus);
 out_thread_map_delete:
        thread_map__put(threads);
        return err;
index 32bac9c0d6947e68b65a240cd12bab853a8b6bfe..5f5eefcb3c748c13a7fe8a3cd9cd0abba9a1c538 100755 (executable)
@@ -1,15 +1,18 @@
 #!/bin/sh
 # SPDX-License-Identifier: LGPL-2.1
 
-if [ $# -ne 2 ] ; then
+if [ $# -ne 3 ] ; then
        [ $# -eq 1 ] && hostarch=$1 || hostarch=`uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/`
+       linux_header_dir=tools/include/uapi/linux
        header_dir=tools/include/uapi/asm-generic
        arch_header_dir=tools/arch/${hostarch}/include/uapi/asm
 else
-       header_dir=$1
-       arch_header_dir=$2
+       linux_header_dir=$1
+       header_dir=$2
+       arch_header_dir=$3
 fi
 
+linux_mman=${linux_header_dir}/mman.h
 arch_mman=${arch_header_dir}/mman.h
 
 # those in egrep -vw are flags, we want just the bits
@@ -20,6 +23,11 @@ egrep -q $regex ${arch_mman} && \
 (egrep $regex ${arch_mman} | \
        sed -r "s/$regex/\2 \1/g"       | \
        xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
+egrep -q $regex ${linux_mman} && \
+(egrep $regex ${linux_mman} | \
+       egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
+       sed -r "s/$regex/\2 \1/g"       | \
+       xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
 ([ ! -f ${arch_mman} ] || egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman}) &&
 (egrep $regex ${header_dir}/mman-common.h | \
        egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
index 6dab340cc506b4db89928d2d6e7144547200b8b1..852d2e271833fde27624b5058d59d1d0b82ef93f 100644 (file)
@@ -2,7 +2,6 @@
 // Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
 
 #include "trace/beauty/beauty.h"
-#include <uapi/linux/fs.h>
 
 static size_t renameat2__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
 {
diff --git a/tools/perf/trace/strace/groups/string b/tools/perf/trace/strace/groups/string
new file mode 100644 (file)
index 0000000..c87129a
--- /dev/null
@@ -0,0 +1,65 @@
+access
+acct
+add_key
+chdir
+chmod
+chown
+chroot
+creat
+delete_module
+execve
+execveat
+faccessat
+fchmodat
+fchownat
+fgetxattr
+finit_module
+fremovexattr
+fsetxattr
+futimesat
+getxattr
+inotify_add_watch
+lchown
+lgetxattr
+link
+linkat
+listxattr
+llistxattr
+lremovexattr
+lsetxattr
+lstat
+memfd_create
+mkdir
+mkdirat
+mknod
+mknodat
+mq_open
+mq_timedsend
+mq_unlink
+name_to_handle_at
+newfstatat
+open
+openat
+pivot_root
+pwrite64
+quotactl
+readlink
+readlinkat
+removexattr
+rename
+renameat
+renameat2
+request_key
+rmdir
+setxattr
+stat
+statfs
+statx
+swapoff
+swapon
+symlink
+symlinkat
+truncate
+unlink
+unlinkat
+utimensat
index 4f75561424eda58ad554046ec7979924f9c7bc0d..4ad37d8c7d6a16a4ffb158788c82f45174728115 100644 (file)
@@ -611,14 +611,16 @@ void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence)
                browser->top = browser->entries;
                break;
        case SEEK_CUR:
-               browser->top = browser->top + browser->top_idx + offset;
+               browser->top = (char **)browser->top + offset;
                break;
        case SEEK_END:
-               browser->top = browser->top + browser->nr_entries - 1 + offset;
+               browser->top = (char **)browser->entries + browser->nr_entries - 1 + offset;
                break;
        default:
                return;
        }
+       assert((char **)browser->top < (char **)browser->entries + browser->nr_entries);
+       assert((char **)browser->top >= (char **)browser->entries);
 }
 
 unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
@@ -630,7 +632,9 @@ unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
                browser->top = browser->entries;
 
        pos = (char **)browser->top;
-       while (idx < browser->nr_entries) {
+       while (idx < browser->nr_entries &&
+              row < (unsigned)SLtt_Screen_Rows - 1) {
+               assert(pos < (char **)browser->entries + browser->nr_entries);
                if (!browser->filter || !browser->filter(browser, *pos)) {
                        ui_browser__gotorc(browser, row, 0);
                        browser->write(browser, pos, row);
index 8fee56b465027a4776918a538c1d29fa540fa42c..fdf86f7981cab2ca62388733c3d36ac3b64494b8 100644 (file)
@@ -3,6 +3,7 @@ perf-y += hists.o
 perf-y += map.o
 perf-y += scripts.o
 perf-y += header.o
+perf-y += res_sample.o
 
 CFLAGS_annotate.o += -DENABLE_SLFUTURE_CONST
 CFLAGS_hists.o    += -DENABLE_SLFUTURE_CONST
index 35bdfd8b1e7198ece317015d68685b86e2fb943c..98d934a36d86a8bcde59fef7d0ae8d4754f42dc2 100644 (file)
@@ -750,7 +750,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
                        continue;
                case 'r':
                        {
-                               script_browse(NULL);
+                               script_browse(NULL, NULL);
                                continue;
                        }
                case 'k':
index aef800d97ea1879ed8c23e19de80bc027c6bf3d8..3421ecbdd3f046c42e4b5ef578abf11bfb591b64 100644 (file)
@@ -7,6 +7,7 @@
 #include <string.h>
 #include <linux/rbtree.h>
 #include <sys/ttydefaults.h>
+#include <linux/time64.h>
 
 #include "../../util/callchain.h"
 #include "../../util/evsel.h"
@@ -30,6 +31,7 @@
 #include "srcline.h"
 #include "string2.h"
 #include "units.h"
+#include "time-utils.h"
 
 #include "sane_ctype.h"
 
@@ -1224,6 +1226,8 @@ void hist_browser__init_hpp(void)
                                hist_browser__hpp_color_overhead_guest_us;
        perf_hpp__format[PERF_HPP__OVERHEAD_ACC].color =
                                hist_browser__hpp_color_overhead_acc;
+
+       res_sample_init();
 }
 
 static int hist_browser__show_entry(struct hist_browser *browser,
@@ -2338,9 +2342,12 @@ close_file_and_continue:
 }
 
 struct popup_action {
+       unsigned long           time;
        struct thread           *thread;
        struct map_symbol       ms;
        int                     socket;
+       struct perf_evsel       *evsel;
+       enum rstype             rstype;
 
        int (*fn)(struct hist_browser *browser, struct popup_action *act);
 };
@@ -2527,45 +2534,136 @@ static int
 do_run_script(struct hist_browser *browser __maybe_unused,
              struct popup_action *act)
 {
-       char script_opt[64];
-       memset(script_opt, 0, sizeof(script_opt));
+       char *script_opt;
+       int len;
+       int n = 0;
 
+       len = 100;
+       if (act->thread)
+               len += strlen(thread__comm_str(act->thread));
+       else if (act->ms.sym)
+               len += strlen(act->ms.sym->name);
+       script_opt = malloc(len);
+       if (!script_opt)
+               return -1;
+
+       script_opt[0] = 0;
        if (act->thread) {
-               scnprintf(script_opt, sizeof(script_opt), " -c %s ",
+               n = scnprintf(script_opt, len, " -c %s ",
                          thread__comm_str(act->thread));
        } else if (act->ms.sym) {
-               scnprintf(script_opt, sizeof(script_opt), " -S %s ",
+               n = scnprintf(script_opt, len, " -S %s ",
                          act->ms.sym->name);
        }
 
-       script_browse(script_opt);
+       if (act->time) {
+               char start[32], end[32];
+               unsigned long starttime = act->time;
+               unsigned long endtime = act->time + symbol_conf.time_quantum;
+
+               if (starttime == endtime) { /* Display 1ms as fallback */
+                       starttime -= 1*NSEC_PER_MSEC;
+                       endtime += 1*NSEC_PER_MSEC;
+               }
+               timestamp__scnprintf_usec(starttime, start, sizeof start);
+               timestamp__scnprintf_usec(endtime, end, sizeof end);
+               n += snprintf(script_opt + n, len - n, " --time %s,%s", start, end);
+       }
+
+       script_browse(script_opt, act->evsel);
+       free(script_opt);
        return 0;
 }
 
 static int
-add_script_opt(struct hist_browser *browser __maybe_unused,
+do_res_sample_script(struct hist_browser *browser __maybe_unused,
+                    struct popup_action *act)
+{
+       struct hist_entry *he;
+
+       he = hist_browser__selected_entry(browser);
+       res_sample_browse(he->res_samples, he->num_res, act->evsel, act->rstype);
+       return 0;
+}
+
+static int
+add_script_opt_2(struct hist_browser *browser __maybe_unused,
               struct popup_action *act, char **optstr,
-              struct thread *thread, struct symbol *sym)
+              struct thread *thread, struct symbol *sym,
+              struct perf_evsel *evsel, const char *tstr)
 {
+
        if (thread) {
-               if (asprintf(optstr, "Run scripts for samples of thread [%s]",
-                            thread__comm_str(thread)) < 0)
+               if (asprintf(optstr, "Run scripts for samples of thread [%s]%s",
+                            thread__comm_str(thread), tstr) < 0)
                        return 0;
        } else if (sym) {
-               if (asprintf(optstr, "Run scripts for samples of symbol [%s]",
-                            sym->name) < 0)
+               if (asprintf(optstr, "Run scripts for samples of symbol [%s]%s",
+                            sym->name, tstr) < 0)
                        return 0;
        } else {
-               if (asprintf(optstr, "Run scripts for all samples") < 0)
+               if (asprintf(optstr, "Run scripts for all samples%s", tstr) < 0)
                        return 0;
        }
 
        act->thread = thread;
        act->ms.sym = sym;
+       act->evsel = evsel;
        act->fn = do_run_script;
        return 1;
 }
 
+static int
+add_script_opt(struct hist_browser *browser,
+              struct popup_action *act, char **optstr,
+              struct thread *thread, struct symbol *sym,
+              struct perf_evsel *evsel)
+{
+       int n, j;
+       struct hist_entry *he;
+
+       n = add_script_opt_2(browser, act, optstr, thread, sym, evsel, "");
+
+       he = hist_browser__selected_entry(browser);
+       if (sort_order && strstr(sort_order, "time")) {
+               char tstr[128];
+
+               optstr++;
+               act++;
+               j = sprintf(tstr, " in ");
+               j += timestamp__scnprintf_usec(he->time, tstr + j,
+                                              sizeof tstr - j);
+               j += sprintf(tstr + j, "-");
+               timestamp__scnprintf_usec(he->time + symbol_conf.time_quantum,
+                                         tstr + j, sizeof tstr - j);
+               n += add_script_opt_2(browser, act, optstr, thread, sym,
+                                         evsel, tstr);
+               act->time = he->time;
+       }
+       return n;
+}
+
+static int
+add_res_sample_opt(struct hist_browser *browser __maybe_unused,
+                  struct popup_action *act, char **optstr,
+                  struct res_sample *res_sample,
+                  struct perf_evsel *evsel,
+                  enum rstype type)
+{
+       if (!res_sample)
+               return 0;
+
+       if (asprintf(optstr, "Show context for individual samples %s",
+               type == A_ASM ? "with assembler" :
+               type == A_SOURCE ? "with source" : "") < 0)
+               return 0;
+
+       act->fn = do_res_sample_script;
+       act->evsel = evsel;
+       act->rstype = type;
+       return 1;
+}
+
 static int
 do_switch_data(struct hist_browser *browser __maybe_unused,
               struct popup_action *act __maybe_unused)
@@ -3031,7 +3129,7 @@ skip_annotation:
                                nr_options += add_script_opt(browser,
                                                             &actions[nr_options],
                                                             &options[nr_options],
-                                                            thread, NULL);
+                                                            thread, NULL, evsel);
                        }
                        /*
                         * Note that browser->selection != NULL
@@ -3046,11 +3144,24 @@ skip_annotation:
                                nr_options += add_script_opt(browser,
                                                             &actions[nr_options],
                                                             &options[nr_options],
-                                                            NULL, browser->selection->sym);
+                                                            NULL, browser->selection->sym,
+                                                            evsel);
                        }
                }
                nr_options += add_script_opt(browser, &actions[nr_options],
-                                            &options[nr_options], NULL, NULL);
+                                            &options[nr_options], NULL, NULL, evsel);
+               nr_options += add_res_sample_opt(browser, &actions[nr_options],
+                                                &options[nr_options],
+                                hist_browser__selected_entry(browser)->res_samples,
+                                evsel, A_NORMAL);
+               nr_options += add_res_sample_opt(browser, &actions[nr_options],
+                                                &options[nr_options],
+                                hist_browser__selected_entry(browser)->res_samples,
+                                evsel, A_ASM);
+               nr_options += add_res_sample_opt(browser, &actions[nr_options],
+                                                &options[nr_options],
+                                hist_browser__selected_entry(browser)->res_samples,
+                                evsel, A_SOURCE);
                nr_options += add_switch_opt(browser, &actions[nr_options],
                                             &options[nr_options]);
 skip_scripting:
diff --git a/tools/perf/ui/browsers/res_sample.c b/tools/perf/ui/browsers/res_sample.c
new file mode 100644 (file)
index 0000000..c0dd731
--- /dev/null
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Display a menu with individual samples to browse with perf script */
+#include "util.h"
+#include "hist.h"
+#include "evsel.h"
+#include "hists.h"
+#include "sort.h"
+#include "config.h"
+#include "time-utils.h"
+#include <linux/time64.h>
+
+static u64 context_len = 10 * NSEC_PER_MSEC;
+
+static int res_sample_config(const char *var, const char *value, void *data __maybe_unused)
+{
+       if (!strcmp(var, "samples.context"))
+               return perf_config_u64(&context_len, var, value);
+       return 0;
+}
+
+void res_sample_init(void)
+{
+       perf_config(res_sample_config, NULL);
+}
+
+int res_sample_browse(struct res_sample *res_samples, int num_res,
+                     struct perf_evsel *evsel, enum rstype rstype)
+{
+       char **names;
+       int i, n;
+       int choice;
+       char *cmd;
+       char pbuf[256], tidbuf[32], cpubuf[32];
+       const char *perf = perf_exe(pbuf, sizeof pbuf);
+       char trange[128], tsample[64];
+       struct res_sample *r;
+       char extra_format[256];
+
+       names = calloc(num_res, sizeof(char *));
+       if (!names)
+               return -1;
+       for (i = 0; i < num_res; i++) {
+               char tbuf[64];
+
+               timestamp__scnprintf_nsec(res_samples[i].time, tbuf, sizeof tbuf);
+               if (asprintf(&names[i], "%s: CPU %d tid %d", tbuf,
+                            res_samples[i].cpu, res_samples[i].tid) < 0) {
+                       while (--i >= 0)
+                               free(names[i]);
+                       free(names);
+                       return -1;
+               }
+       }
+       choice = ui__popup_menu(num_res, names);
+       for (i = 0; i < num_res; i++)
+               free(names[i]);
+       free(names);
+
+       if (choice < 0 || choice >= num_res)
+               return -1;
+       r = &res_samples[choice];
+
+       n = timestamp__scnprintf_nsec(r->time - context_len, trange, sizeof trange);
+       trange[n++] = ',';
+       timestamp__scnprintf_nsec(r->time + context_len, trange + n, sizeof trange - n);
+
+       timestamp__scnprintf_nsec(r->time, tsample, sizeof tsample);
+
+       attr_to_script(extra_format, &evsel->attr);
+
+       if (asprintf(&cmd, "%s script %s%s --time %s %s%s %s%s --ns %s %s %s %s %s | less +/%s",
+                    perf,
+                    input_name ? "-i " : "",
+                    input_name ? input_name : "",
+                    trange,
+                    r->cpu >= 0 ? "--cpu " : "",
+                    r->cpu >= 0 ? (sprintf(cpubuf, "%d", r->cpu), cpubuf) : "",
+                    r->tid ? "--tid " : "",
+                    r->tid ? (sprintf(tidbuf, "%d", r->tid), tidbuf) : "",
+                    extra_format,
+                    rstype == A_ASM ? "-F +insn --xed" :
+                    rstype == A_SOURCE ? "-F +srcline,+srccode" : "",
+                    symbol_conf.inline_name ? "--inline" : "",
+                    "--show-lost-events ",
+                    r->tid ? "--show-switch-events --show-task-events " : "",
+                    tsample) < 0)
+               return -1;
+       run_script(cmd);
+       free(cmd);
+       return 0;
+}
index 90a32ac69e76c9eeae87b913008874ececb0f366..27cf3ab88d13f894e1e4597eeec4112c4d00fe9e 100644 (file)
@@ -1,34 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0
-#include <elf.h>
-#include <inttypes.h>
-#include <sys/ttydefaults.h>
-#include <string.h>
 #include "../../util/sort.h"
 #include "../../util/util.h"
 #include "../../util/hist.h"
 #include "../../util/debug.h"
 #include "../../util/symbol.h"
 #include "../browser.h"
-#include "../helpline.h"
 #include "../libslang.h"
-
-/* 2048 lines should be enough for a script output */
-#define MAX_LINES              2048
-
-/* 160 bytes for one output line */
-#define AVERAGE_LINE_LEN       160
-
-struct script_line {
-       struct list_head node;
-       char line[AVERAGE_LINE_LEN];
-};
-
-struct perf_script_browser {
-       struct ui_browser b;
-       struct list_head entries;
-       const char *script_name;
-       int nr_lines;
-};
+#include "config.h"
 
 #define SCRIPT_NAMELEN 128
 #define SCRIPT_MAX_NO  64
@@ -40,149 +18,169 @@ struct perf_script_browser {
  */
 #define SCRIPT_FULLPATH_LEN    256
 
+struct script_config {
+       const char **names;
+       char **paths;
+       int index;
+       const char *perf;
+       char extra_format[256];
+};
+
+void attr_to_script(char *extra_format, struct perf_event_attr *attr)
+{
+       extra_format[0] = 0;
+       if (attr->read_format & PERF_FORMAT_GROUP)
+               strcat(extra_format, " -F +metric");
+       if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK)
+               strcat(extra_format, " -F +brstackinsn --xed");
+       if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
+               strcat(extra_format, " -F +iregs");
+       if (attr->sample_type & PERF_SAMPLE_REGS_USER)
+               strcat(extra_format, " -F +uregs");
+       if (attr->sample_type & PERF_SAMPLE_PHYS_ADDR)
+               strcat(extra_format, " -F +phys_addr");
+}
+
+static int add_script_option(const char *name, const char *opt,
+                            struct script_config *c)
+{
+       c->names[c->index] = name;
+       if (asprintf(&c->paths[c->index],
+                    "%s script %s -F +metric %s %s",
+                    c->perf, opt, symbol_conf.inline_name ? " --inline" : "",
+                    c->extra_format) < 0)
+               return -1;
+       c->index++;
+       return 0;
+}
+
+static int scripts_config(const char *var, const char *value, void *data)
+{
+       struct script_config *c = data;
+
+       if (!strstarts(var, "scripts."))
+               return -1;
+       if (c->index >= SCRIPT_MAX_NO)
+               return -1;
+       c->names[c->index] = strdup(var + 7);
+       if (!c->names[c->index])
+               return -1;
+       if (asprintf(&c->paths[c->index], "%s %s", value,
+                    c->extra_format) < 0)
+               return -1;
+       c->index++;
+       return 0;
+}
+
 /*
  * When success, will copy the full path of the selected script
  * into  the buffer pointed by script_name, and return 0.
  * Return -1 on failure.
  */
-static int list_scripts(char *script_name)
+static int list_scripts(char *script_name, bool *custom,
+                       struct perf_evsel *evsel)
 {
-       char *buf, *names[SCRIPT_MAX_NO], *paths[SCRIPT_MAX_NO];
-       int i, num, choice, ret = -1;
+       char *buf, *paths[SCRIPT_MAX_NO], *names[SCRIPT_MAX_NO];
+       int i, num, choice;
+       int ret = 0;
+       int max_std, custom_perf;
+       char pbuf[256];
+       const char *perf = perf_exe(pbuf, sizeof pbuf);
+       struct script_config scriptc = {
+               .names = (const char **)names,
+               .paths = paths,
+               .perf = perf
+       };
+
+       script_name[0] = 0;
 
        /* Preset the script name to SCRIPT_NAMELEN */
        buf = malloc(SCRIPT_MAX_NO * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN));
        if (!buf)
-               return ret;
+               return -1;
 
-       for (i = 0; i < SCRIPT_MAX_NO; i++) {
-               names[i] = buf + i * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN);
+       if (evsel)
+               attr_to_script(scriptc.extra_format, &evsel->attr);
+       add_script_option("Show individual samples", "", &scriptc);
+       add_script_option("Show individual samples with assembler", "-F +insn --xed",
+                         &scriptc);
+       add_script_option("Show individual samples with source", "-F +srcline,+srccode",
+                         &scriptc);
+       perf_config(scripts_config, &scriptc);
+       custom_perf = scriptc.index;
+       add_script_option("Show samples with custom perf script arguments", "", &scriptc);
+       i = scriptc.index;
+       max_std = i;
+
+       for (; i < SCRIPT_MAX_NO; i++) {
+               names[i] = buf + (i - max_std) * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN);
                paths[i] = names[i] + SCRIPT_NAMELEN;
        }
 
-       num = find_scripts(names, paths);
-       if (num > 0) {
-               choice = ui__popup_menu(num, names);
-               if (choice < num && choice >= 0) {
-                       strcpy(script_name, paths[choice]);
-                       ret = 0;
-               }
+       num = find_scripts(names + max_std, paths + max_std, SCRIPT_MAX_NO - max_std,
+                       SCRIPT_FULLPATH_LEN);
+       if (num < 0)
+               num = 0;
+       choice = ui__popup_menu(num + max_std, (char * const *)names);
+       if (choice < 0) {
+               ret = -1;
+               goto out;
        }
+       if (choice == custom_perf) {
+               char script_args[50];
+               int key = ui_browser__input_window("perf script command",
+                               "Enter perf script command line (without perf script prefix)",
+                               script_args, "", 0);
+               if (key != K_ENTER)
+                       return -1;
+               sprintf(script_name, "%s script %s", perf, script_args);
+       } else if (choice < num + max_std) {
+               strcpy(script_name, paths[choice]);
+       }
+       *custom = choice >= max_std;
 
+out:
        free(buf);
+       for (i = 0; i < max_std; i++)
+               free(paths[i]);
        return ret;
 }
 
-static void script_browser__write(struct ui_browser *browser,
-                                  void *entry, int row)
+void run_script(char *cmd)
 {
-       struct script_line *sline = list_entry(entry, struct script_line, node);
-       bool current_entry = ui_browser__is_current_entry(browser, row);
-
-       ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
-                                                      HE_COLORSET_NORMAL);
-
-       ui_browser__write_nstring(browser, sline->line, browser->width);
+       pr_debug("Running %s\n", cmd);
+       SLang_reset_tty();
+       if (system(cmd) < 0)
+               pr_warning("Cannot run %s\n", cmd);
+       /*
+        * SLang doesn't seem to reset the whole terminal, so be more
+        * forceful to get back to the original state.
+        */
+       printf("\033[c\033[H\033[J");
+       fflush(stdout);
+       SLang_init_tty(0, 0, 0);
+       SLsmg_refresh();
 }
 
-static int script_browser__run(struct perf_script_browser *browser)
+int script_browse(const char *script_opt, struct perf_evsel *evsel)
 {
-       int key;
+       char *cmd, script_name[SCRIPT_FULLPATH_LEN];
+       bool custom = false;
 
-       if (ui_browser__show(&browser->b, browser->script_name,
-                            "Press ESC to exit") < 0)
+       memset(script_name, 0, SCRIPT_FULLPATH_LEN);
+       if (list_scripts(script_name, &custom, evsel))
                return -1;
 
-       while (1) {
-               key = ui_browser__run(&browser->b, 0);
-
-               /* We can add some special key handling here if needed */
-               break;
-       }
-
-       ui_browser__hide(&browser->b);
-       return key;
-}
-
-
-int script_browse(const char *script_opt)
-{
-       char cmd[SCRIPT_FULLPATH_LEN*2], script_name[SCRIPT_FULLPATH_LEN];
-       char *line = NULL;
-       size_t len = 0;
-       ssize_t retlen;
-       int ret = -1, nr_entries = 0;
-       FILE *fp;
-       void *buf;
-       struct script_line *sline;
-
-       struct perf_script_browser script = {
-               .b = {
-                       .refresh    = ui_browser__list_head_refresh,
-                       .seek       = ui_browser__list_head_seek,
-                       .write      = script_browser__write,
-               },
-               .script_name = script_name,
-       };
-
-       INIT_LIST_HEAD(&script.entries);
-
-       /* Save each line of the output in one struct script_line object. */
-       buf = zalloc((sizeof(*sline)) * MAX_LINES);
-       if (!buf)
+       if (asprintf(&cmd, "%s%s %s %s%s 2>&1 | less",
+                       custom ? "perf script -s " : "",
+                       script_name,
+                       script_opt ? script_opt : "",
+                       input_name ? "-i " : "",
+                       input_name ? input_name : "") < 0)
                return -1;
-       sline = buf;
-
-       memset(script_name, 0, SCRIPT_FULLPATH_LEN);
-       if (list_scripts(script_name))
-               goto exit;
-
-       sprintf(cmd, "perf script -s %s ", script_name);
 
-       if (script_opt)
-               strcat(cmd, script_opt);
+       run_script(cmd);
+       free(cmd);
 
-       if (input_name) {
-               strcat(cmd, " -i ");
-               strcat(cmd, input_name);
-       }
-
-       strcat(cmd, " 2>&1");
-
-       fp = popen(cmd, "r");
-       if (!fp)
-               goto exit;
-
-       while ((retlen = getline(&line, &len, fp)) != -1) {
-               strncpy(sline->line, line, AVERAGE_LINE_LEN);
-
-               /* If one output line is very large, just cut it short */
-               if (retlen >= AVERAGE_LINE_LEN) {
-                       sline->line[AVERAGE_LINE_LEN - 1] = '\0';
-                       sline->line[AVERAGE_LINE_LEN - 2] = '\n';
-               }
-               list_add_tail(&sline->node, &script.entries);
-
-               if (script.b.width < retlen)
-                       script.b.width = retlen;
-
-               if (nr_entries++ >= MAX_LINES - 1)
-                       break;
-               sline++;
-       }
-
-       if (script.b.width > AVERAGE_LINE_LEN)
-               script.b.width = AVERAGE_LINE_LEN;
-
-       free(line);
-       pclose(fp);
-
-       script.nr_lines = nr_entries;
-       script.b.nr_entries = nr_entries;
-       script.b.entries = &script.entries;
-
-       ret = script_browser__run(&script);
-exit:
-       free(buf);
-       return ret;
+       return 0;
 }
index 5f6dbbf5d74931dd3379e57717bc92acf93e5472..09762985c7137c36e64b68046639f699477b37c0 100644 (file)
 #include <errno.h>
 #include <inttypes.h>
 #include <libgen.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+#include <linux/btf.h>
 #include "util.h"
 #include "ui/ui.h"
 #include "sort.h"
@@ -24,6 +28,7 @@
 #include "annotate.h"
 #include "evsel.h"
 #include "evlist.h"
+#include "bpf-event.h"
 #include "block-range.h"
 #include "string2.h"
 #include "arch/common.h"
@@ -31,6 +36,7 @@
 #include <pthread.h>
 #include <linux/bitops.h>
 #include <linux/kernel.h>
+#include <bpf/libbpf.h>
 
 /* FIXME: For the HE_COLORSET */
 #include "ui/browser.h"
@@ -1615,6 +1621,9 @@ int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *
                          "  --vmlinux vmlinux\n", build_id_msg ?: "");
        }
                break;
+       case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
+               scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
+               break;
        default:
                scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
                break;
@@ -1674,6 +1683,156 @@ fallback:
        return 0;
 }
 
+#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+#define PACKAGE "perf"
+#include <bfd.h>
+#include <dis-asm.h>
+
+static int symbol__disassemble_bpf(struct symbol *sym,
+                                  struct annotate_args *args)
+{
+       struct annotation *notes = symbol__annotation(sym);
+       struct annotation_options *opts = args->options;
+       struct bpf_prog_info_linear *info_linear;
+       struct bpf_prog_linfo *prog_linfo = NULL;
+       struct bpf_prog_info_node *info_node;
+       int len = sym->end - sym->start;
+       disassembler_ftype disassemble;
+       struct map *map = args->ms.map;
+       struct disassemble_info info;
+       struct dso *dso = map->dso;
+       int pc = 0, count, sub_id;
+       struct btf *btf = NULL;
+       char tpath[PATH_MAX];
+       size_t buf_size;
+       int nr_skip = 0;
+       int ret = -1;
+       char *buf;
+       bfd *bfdf;
+       FILE *s;
+
+       if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
+               return -1;
+
+       pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
+                 sym->name, sym->start, sym->end - sym->start);
+
+       memset(tpath, 0, sizeof(tpath));
+       perf_exe(tpath, sizeof(tpath));
+
+       bfdf = bfd_openr(tpath, NULL);
+       assert(bfdf);
+       assert(bfd_check_format(bfdf, bfd_object));
+
+       s = open_memstream(&buf, &buf_size);
+       if (!s)
+               goto out;
+       init_disassemble_info(&info, s,
+                             (fprintf_ftype) fprintf);
+
+       info.arch = bfd_get_arch(bfdf);
+       info.mach = bfd_get_mach(bfdf);
+
+       info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
+                                                dso->bpf_prog.id);
+       if (!info_node)
+               goto out;
+       info_linear = info_node->info_linear;
+       sub_id = dso->bpf_prog.sub_id;
+
+       info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns);
+       info.buffer_length = info_linear->info.jited_prog_len;
+
+       if (info_linear->info.nr_line_info)
+               prog_linfo = bpf_prog_linfo__new(&info_linear->info);
+
+       if (info_linear->info.btf_id) {
+               struct btf_node *node;
+
+               node = perf_env__find_btf(dso->bpf_prog.env,
+                                         info_linear->info.btf_id);
+               if (node)
+                       btf = btf__new((__u8 *)(node->data),
+                                      node->data_size);
+       }
+
+       disassemble_init_for_target(&info);
+
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+       disassemble = disassembler(info.arch,
+                                  bfd_big_endian(bfdf),
+                                  info.mach,
+                                  bfdf);
+#else
+       disassemble = disassembler(bfdf);
+#endif
+       assert(disassemble);
+
+       fflush(s);
+       do {
+               const struct bpf_line_info *linfo = NULL;
+               struct disasm_line *dl;
+               size_t prev_buf_size;
+               const char *srcline;
+               u64 addr;
+
+               addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id];
+               count = disassemble(pc, &info);
+
+               if (prog_linfo)
+                       linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
+                                                               addr, sub_id,
+                                                               nr_skip);
+
+               if (linfo && btf) {
+                       srcline = btf__name_by_offset(btf, linfo->line_off);
+                       nr_skip++;
+               } else
+                       srcline = NULL;
+
+               fprintf(s, "\n");
+               prev_buf_size = buf_size;
+               fflush(s);
+
+               if (!opts->hide_src_code && srcline) {
+                       args->offset = -1;
+                       args->line = strdup(srcline);
+                       args->line_nr = 0;
+                       args->ms.sym  = sym;
+                       dl = disasm_line__new(args);
+                       if (dl) {
+                               annotation_line__add(&dl->al,
+                                                    &notes->src->source);
+                       }
+               }
+
+               args->offset = pc;
+               args->line = buf + prev_buf_size;
+               args->line_nr = 0;
+               args->ms.sym  = sym;
+               dl = disasm_line__new(args);
+               if (dl)
+                       annotation_line__add(&dl->al, &notes->src->source);
+
+               pc += count;
+       } while (count > 0 && pc < len);
+
+       ret = 0;
+out:
+       free(prog_linfo);
+       free(btf);
+       fclose(s);
+       bfd_close(bfdf);
+       return ret;
+}
+#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused,
+                                  struct annotate_args *args __maybe_unused)
+{
+       return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
+}
+#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+
 static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
 {
        struct annotation_options *opts = args->options;
@@ -1701,7 +1860,9 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
        pr_debug("annotating [%p] %30s : [%p] %30s\n",
                 dso, dso->long_name, sym, sym->name);
 
-       if (dso__is_kcore(dso)) {
+       if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) {
+               return symbol__disassemble_bpf(sym, args);
+       } else if (dso__is_kcore(dso)) {
                kce.kcore_filename = symfs_filename;
                kce.addr = map__rip_2objdump(map, sym->start);
                kce.offs = sym->start;
index df34fe48316495cce4435ae6f319fe71f87d4384..5bc0cf655d377eb2a352545067e680cb6917c277 100644 (file)
@@ -369,6 +369,7 @@ enum symbol_disassemble_errno {
        __SYMBOL_ANNOTATE_ERRNO__START          = -10000,
 
        SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX       = __SYMBOL_ANNOTATE_ERRNO__START,
+       SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF,
 
        __SYMBOL_ANNOTATE_ERRNO__END,
 };
diff --git a/tools/perf/util/archinsn.h b/tools/perf/util/archinsn.h
new file mode 100644 (file)
index 0000000..448cbb6
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef INSN_H
+#define INSN_H 1
+
+struct perf_sample;
+struct machine;
+struct thread;
+
+void arch_fetch_insn(struct perf_sample *sample,
+                    struct thread *thread,
+                    struct machine *machine);
+
+#endif
index 028c8ec1f62a9c347b5c493d3b0caddf74145451..2a4a0da35632feeca4d0da640c3f65be2e62532e 100644 (file)
@@ -3,11 +3,17 @@
 #include <stdlib.h>
 #include <bpf/bpf.h>
 #include <bpf/btf.h>
+#include <bpf/libbpf.h>
 #include <linux/btf.h>
+#include <linux/err.h>
 #include "bpf-event.h"
 #include "debug.h"
 #include "symbol.h"
 #include "machine.h"
+#include "env.h"
+#include "session.h"
+#include "map.h"
+#include "evlist.h"
 
 #define ptr_to_u64(ptr)    ((__u64)(unsigned long)(ptr))
 
@@ -21,15 +27,122 @@ static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
        return ret;
 }
 
+static int machine__process_bpf_event_load(struct machine *machine,
+                                          union perf_event *event,
+                                          struct perf_sample *sample __maybe_unused)
+{
+       struct bpf_prog_info_linear *info_linear;
+       struct bpf_prog_info_node *info_node;
+       struct perf_env *env = machine->env;
+       int id = event->bpf_event.id;
+       unsigned int i;
+
+       /* perf-record, no need to handle bpf-event */
+       if (env == NULL)
+               return 0;
+
+       info_node = perf_env__find_bpf_prog_info(env, id);
+       if (!info_node)
+               return 0;
+       info_linear = info_node->info_linear;
+
+       for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
+               u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
+               u64 addr = addrs[i];
+               struct map *map;
+
+               map = map_groups__find(&machine->kmaps, addr);
+
+               if (map) {
+                       map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
+                       map->dso->bpf_prog.id = id;
+                       map->dso->bpf_prog.sub_id = i;
+                       map->dso->bpf_prog.env = env;
+               }
+       }
+       return 0;
+}
+
 int machine__process_bpf_event(struct machine *machine __maybe_unused,
                               union perf_event *event,
                               struct perf_sample *sample __maybe_unused)
 {
        if (dump_trace)
                perf_event__fprintf_bpf_event(event, stdout);
+
+       switch (event->bpf_event.type) {
+       case PERF_BPF_EVENT_PROG_LOAD:
+               return machine__process_bpf_event_load(machine, event, sample);
+
+       case PERF_BPF_EVENT_PROG_UNLOAD:
+               /*
+                * Do not free bpf_prog_info and btf of the program here,
+                * as annotation still need them. They will be freed at
+                * the end of the session.
+                */
+               break;
+       default:
+               pr_debug("unexpected bpf_event type of %d\n",
+                        event->bpf_event.type);
+               break;
+       }
        return 0;
 }
 
+static int perf_env__fetch_btf(struct perf_env *env,
+                              u32 btf_id,
+                              struct btf *btf)
+{
+       struct btf_node *node;
+       u32 data_size;
+       const void *data;
+
+       data = btf__get_raw_data(btf, &data_size);
+
+       node = malloc(data_size + sizeof(struct btf_node));
+       if (!node)
+               return -1;
+
+       node->id = btf_id;
+       node->data_size = data_size;
+       memcpy(node->data, data, data_size);
+
+       perf_env__insert_btf(env, node);
+       return 0;
+}
+
+static int synthesize_bpf_prog_name(char *buf, int size,
+                                   struct bpf_prog_info *info,
+                                   struct btf *btf,
+                                   u32 sub_id)
+{
+       u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
+       void *func_infos = (void *)(uintptr_t)(info->func_info);
+       u32 sub_prog_cnt = info->nr_jited_ksyms;
+       const struct bpf_func_info *finfo;
+       const char *short_name = NULL;
+       const struct btf_type *t;
+       int name_len;
+
+       name_len = snprintf(buf, size, "bpf_prog_");
+       name_len += snprintf_hex(buf + name_len, size - name_len,
+                                prog_tags[sub_id], BPF_TAG_SIZE);
+       if (btf) {
+               finfo = func_infos + sub_id * info->func_info_rec_size;
+               t = btf__type_by_id(btf, finfo->type_id);
+               short_name = btf__name_by_offset(btf, t->name_off);
+       } else if (sub_id == 0 && sub_prog_cnt == 1) {
+               /* no subprog */
+               if (info->name[0])
+                       short_name = info->name;
+       } else
+               short_name = "F";
+       if (short_name)
+               name_len += snprintf(buf + name_len, size - name_len,
+                                    "_%s", short_name);
+       return name_len;
+}
+
 /*
  * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
  * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
@@ -40,7 +153,7 @@ int machine__process_bpf_event(struct machine *machine __maybe_unused,
  *   -1 for failures;
  *   -2 for lack of kernel support.
  */
-static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
+static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
                                               perf_event__handler_t process,
                                               struct machine *machine,
                                               int fd,
@@ -49,102 +162,71 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
 {
        struct ksymbol_event *ksymbol_event = &event->ksymbol_event;
        struct bpf_event *bpf_event = &event->bpf_event;
-       u32 sub_prog_cnt, i, func_info_rec_size = 0;
-       u8 (*prog_tags)[BPF_TAG_SIZE] = NULL;
-       struct bpf_prog_info info = { .type = 0, };
-       u32 info_len = sizeof(info);
-       void *func_infos = NULL;
-       u64 *prog_addrs = NULL;
+       struct bpf_prog_info_linear *info_linear;
+       struct perf_tool *tool = session->tool;
+       struct bpf_prog_info_node *info_node;
+       struct bpf_prog_info *info;
        struct btf *btf = NULL;
-       u32 *prog_lens = NULL;
-       bool has_btf = false;
-       char errbuf[512];
+       struct perf_env *env;
+       u32 sub_prog_cnt, i;
        int err = 0;
+       u64 arrays;
+
+       /*
+        * for perf-record and perf-report use header.env;
+        * otherwise, use global perf_env.
+        */
+       env = session->data ? &session->header.env : &perf_env;
 
-       /* Call bpf_obj_get_info_by_fd() to get sizes of arrays */
-       err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+       arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
+       arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
+       arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
+       arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
 
-       if (err) {
-               pr_debug("%s: failed to get BPF program info: %s, aborting\n",
-                        __func__, str_error_r(errno, errbuf, sizeof(errbuf)));
+       info_linear = bpf_program__get_prog_info_linear(fd, arrays);
+       if (IS_ERR_OR_NULL(info_linear)) {
+               info_linear = NULL;
+               pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
                return -1;
        }
-       if (info_len < offsetof(struct bpf_prog_info, prog_tags)) {
+
+       if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
                pr_debug("%s: the kernel is too old, aborting\n", __func__);
                return -2;
        }
 
+       info = &info_linear->info;
+
        /* number of ksyms, func_lengths, and tags should match */
-       sub_prog_cnt = info.nr_jited_ksyms;
-       if (sub_prog_cnt != info.nr_prog_tags ||
-           sub_prog_cnt != info.nr_jited_func_lens)
+       sub_prog_cnt = info->nr_jited_ksyms;
+       if (sub_prog_cnt != info->nr_prog_tags ||
+           sub_prog_cnt != info->nr_jited_func_lens)
                return -1;
 
        /* check BTF func info support */
-       if (info.btf_id && info.nr_func_info && info.func_info_rec_size) {
+       if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
                /* btf func info number should be same as sub_prog_cnt */
-               if (sub_prog_cnt != info.nr_func_info) {
+               if (sub_prog_cnt != info->nr_func_info) {
                        pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
-                       return -1;
-               }
-               if (btf__get_from_id(info.btf_id, &btf)) {
-                       pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info.btf_id);
-                       return -1;
+                       err = -1;
+                       goto out;
                }
-               func_info_rec_size = info.func_info_rec_size;
-               func_infos = calloc(sub_prog_cnt, func_info_rec_size);
-               if (!func_infos) {
-                       pr_debug("%s: failed to allocate memory for func_infos, aborting\n", __func__);
-                       return -1;
+               if (btf__get_from_id(info->btf_id, &btf)) {
+                       pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
+                       err = -1;
+                       btf = NULL;
+                       goto out;
                }
-               has_btf = true;
-       }
-
-       /*
-        * We need address, length, and tag for each sub program.
-        * Allocate memory and call bpf_obj_get_info_by_fd() again
-        */
-       prog_addrs = calloc(sub_prog_cnt, sizeof(u64));
-       if (!prog_addrs) {
-               pr_debug("%s: failed to allocate memory for prog_addrs, aborting\n", __func__);
-               goto out;
-       }
-       prog_lens = calloc(sub_prog_cnt, sizeof(u32));
-       if (!prog_lens) {
-               pr_debug("%s: failed to allocate memory for prog_lens, aborting\n", __func__);
-               goto out;
-       }
-       prog_tags = calloc(sub_prog_cnt, BPF_TAG_SIZE);
-       if (!prog_tags) {
-               pr_debug("%s: failed to allocate memory for prog_tags, aborting\n", __func__);
-               goto out;
-       }
-
-       memset(&info, 0, sizeof(info));
-       info.nr_jited_ksyms = sub_prog_cnt;
-       info.nr_jited_func_lens = sub_prog_cnt;
-       info.nr_prog_tags = sub_prog_cnt;
-       info.jited_ksyms = ptr_to_u64(prog_addrs);
-       info.jited_func_lens = ptr_to_u64(prog_lens);
-       info.prog_tags = ptr_to_u64(prog_tags);
-       info_len = sizeof(info);
-       if (has_btf) {
-               info.nr_func_info = sub_prog_cnt;
-               info.func_info_rec_size = func_info_rec_size;
-               info.func_info = ptr_to_u64(func_infos);
-       }
-
-       err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
-       if (err) {
-               pr_debug("%s: failed to get BPF program info, aborting\n", __func__);
-               goto out;
+               perf_env__fetch_btf(env, info->btf_id, btf);
        }
 
        /* Synthesize PERF_RECORD_KSYMBOL */
        for (i = 0; i < sub_prog_cnt; i++) {
-               const struct bpf_func_info *finfo;
-               const char *short_name = NULL;
-               const struct btf_type *t;
+               __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
+               __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
                int name_len;
 
                *ksymbol_event = (struct ksymbol_event){
@@ -157,26 +239,9 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
                        .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
                        .flags = 0,
                };
-               name_len = snprintf(ksymbol_event->name, KSYM_NAME_LEN,
-                                   "bpf_prog_");
-               name_len += snprintf_hex(ksymbol_event->name + name_len,
-                                        KSYM_NAME_LEN - name_len,
-                                        prog_tags[i], BPF_TAG_SIZE);
-               if (has_btf) {
-                       finfo = func_infos + i * info.func_info_rec_size;
-                       t = btf__type_by_id(btf, finfo->type_id);
-                       short_name = btf__name_by_offset(btf, t->name_off);
-               } else if (i == 0 && sub_prog_cnt == 1) {
-                       /* no subprog */
-                       if (info.name[0])
-                               short_name = info.name;
-               } else
-                       short_name = "F";
-               if (short_name)
-                       name_len += snprintf(ksymbol_event->name + name_len,
-                                            KSYM_NAME_LEN - name_len,
-                                            "_%s", short_name);
 
+               name_len = synthesize_bpf_prog_name(ksymbol_event->name,
+                                                   KSYM_NAME_LEN, info, btf, i);
                ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
                                                         sizeof(u64));
 
@@ -186,8 +251,8 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
                                                     machine, process);
        }
 
-       /* Synthesize PERF_RECORD_BPF_EVENT */
-       if (opts->bpf_event) {
+       if (!opts->no_bpf_event) {
+               /* Synthesize PERF_RECORD_BPF_EVENT */
                *bpf_event = (struct bpf_event){
                        .header = {
                                .type = PERF_RECORD_BPF_EVENT,
@@ -195,25 +260,38 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
                        },
                        .type = PERF_BPF_EVENT_PROG_LOAD,
                        .flags = 0,
-                       .id = info.id,
+                       .id = info->id,
                };
-               memcpy(bpf_event->tag, prog_tags[i], BPF_TAG_SIZE);
+               memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
                memset((void *)event + event->header.size, 0, machine->id_hdr_size);
                event->header.size += machine->id_hdr_size;
+
+               /* save bpf_prog_info to env */
+               info_node = malloc(sizeof(struct bpf_prog_info_node));
+               if (!info_node) {
+                       err = -1;
+                       goto out;
+               }
+
+               info_node->info_linear = info_linear;
+               perf_env__insert_bpf_prog_info(env, info_node);
+               info_linear = NULL;
+
+               /*
+                * process after saving bpf_prog_info to env, so that
+                * required information is ready for look up
+                */
                err = perf_tool__process_synth_event(tool, event,
                                                     machine, process);
        }
 
 out:
-       free(prog_tags);
-       free(prog_lens);
-       free(prog_addrs);
-       free(func_infos);
+       free(info_linear);
        free(btf);
        return err ? -1 : 0;
 }
 
-int perf_event__synthesize_bpf_events(struct perf_tool *tool,
+int perf_event__synthesize_bpf_events(struct perf_session *session,
                                      perf_event__handler_t process,
                                      struct machine *machine,
                                      struct record_opts *opts)
@@ -247,7 +325,7 @@ int perf_event__synthesize_bpf_events(struct perf_tool *tool,
                        continue;
                }
 
-               err = perf_event__synthesize_one_bpf_prog(tool, process,
+               err = perf_event__synthesize_one_bpf_prog(session, process,
                                                          machine, fd,
                                                          event, opts);
                close(fd);
@@ -261,3 +339,142 @@ int perf_event__synthesize_bpf_events(struct perf_tool *tool,
        free(event);
        return err;
 }
+
+static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
+{
+       struct bpf_prog_info_linear *info_linear;
+       struct bpf_prog_info_node *info_node;
+       struct btf *btf = NULL;
+       u64 arrays;
+       u32 btf_id;
+       int fd;
+
+       fd = bpf_prog_get_fd_by_id(id);
+       if (fd < 0)
+               return;
+
+       arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
+       arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
+       arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
+       arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+       arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
+
+       info_linear = bpf_program__get_prog_info_linear(fd, arrays);
+       if (IS_ERR_OR_NULL(info_linear)) {
+               pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
+               goto out;
+       }
+
+       btf_id = info_linear->info.btf_id;
+
+       info_node = malloc(sizeof(struct bpf_prog_info_node));
+       if (info_node) {
+               info_node->info_linear = info_linear;
+               perf_env__insert_bpf_prog_info(env, info_node);
+       } else
+               free(info_linear);
+
+       if (btf_id == 0)
+               goto out;
+
+       if (btf__get_from_id(btf_id, &btf)) {
+               pr_debug("%s: failed to get BTF of id %u, aborting\n",
+                        __func__, btf_id);
+               goto out;
+       }
+       perf_env__fetch_btf(env, btf_id, btf);
+
+out:
+       free(btf);
+       close(fd);
+}
+
+static int bpf_event__sb_cb(union perf_event *event, void *data)
+{
+       struct perf_env *env = data;
+
+       if (event->header.type != PERF_RECORD_BPF_EVENT)
+               return -1;
+
+       switch (event->bpf_event.type) {
+       case PERF_BPF_EVENT_PROG_LOAD:
+               perf_env__add_bpf_info(env, event->bpf_event.id);
+
+       case PERF_BPF_EVENT_PROG_UNLOAD:
+               /*
+                * Do not free bpf_prog_info and btf of the program here,
+                * as annotation still need them. They will be freed at
+                * the end of the session.
+                */
+               break;
+       default:
+               pr_debug("unexpected bpf_event type of %d\n",
+                        event->bpf_event.type);
+               break;
+       }
+
+       return 0;
+}
+
+int bpf_event__add_sb_event(struct perf_evlist **evlist,
+                           struct perf_env *env)
+{
+       struct perf_event_attr attr = {
+               .type             = PERF_TYPE_SOFTWARE,
+               .config           = PERF_COUNT_SW_DUMMY,
+               .sample_id_all    = 1,
+               .watermark        = 1,
+               .bpf_event        = 1,
+               .size      = sizeof(attr), /* to capture ABI version */
+       };
+
+       /*
+        * Older gcc versions don't support designated initializers, like above,
+        * for unnamed union members, such as the following:
+        */
+       attr.wakeup_watermark = 1;
+
+       return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
+}
+
+void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+                                   struct perf_env *env,
+                                   FILE *fp)
+{
+       __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
+       __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
+       char name[KSYM_NAME_LEN];
+       struct btf *btf = NULL;
+       u32 sub_prog_cnt, i;
+
+       sub_prog_cnt = info->nr_jited_ksyms;
+       if (sub_prog_cnt != info->nr_prog_tags ||
+           sub_prog_cnt != info->nr_jited_func_lens)
+               return;
+
+       if (info->btf_id) {
+               struct btf_node *node;
+
+               node = perf_env__find_btf(env, info->btf_id);
+               if (node)
+                       btf = btf__new((__u8 *)(node->data),
+                                      node->data_size);
+       }
+
+       if (sub_prog_cnt == 1) {
+               synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
+               fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
+                       info->id, name, prog_addrs[0], prog_lens[0]);
+               return;
+       }
+
+       fprintf(fp, "# bpf_prog_info %u:\n", info->id);
+       for (i = 0; i < sub_prog_cnt; i++) {
+               synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
+
+               fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
+                       i, name, prog_addrs[i], prog_lens[i]);
+       }
+}
index 7890067e1a3781a1cdb629915eab5e3bc6f46b33..04c33b3bfe281055292f512812972babaf91589d 100644 (file)
@@ -3,22 +3,45 @@
 #define __PERF_BPF_EVENT_H
 
 #include <linux/compiler.h>
+#include <linux/rbtree.h>
+#include <pthread.h>
+#include <api/fd/array.h>
 #include "event.h"
+#include <stdio.h>
 
 struct machine;
 union perf_event;
+struct perf_env;
 struct perf_sample;
-struct perf_tool;
 struct record_opts;
+struct evlist;
+struct target;
+
+struct bpf_prog_info_node {
+       struct bpf_prog_info_linear     *info_linear;
+       struct rb_node                  rb_node;
+};
+
+struct btf_node {
+       struct rb_node  rb_node;
+       u32             id;
+       u32             data_size;
+       char            data[];
+};
 
 #ifdef HAVE_LIBBPF_SUPPORT
 int machine__process_bpf_event(struct machine *machine, union perf_event *event,
                               struct perf_sample *sample);
 
-int perf_event__synthesize_bpf_events(struct perf_tool *tool,
+int perf_event__synthesize_bpf_events(struct perf_session *session,
                                      perf_event__handler_t process,
                                      struct machine *machine,
                                      struct record_opts *opts);
+int bpf_event__add_sb_event(struct perf_evlist **evlist,
+                                struct perf_env *env);
+void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+                                   struct perf_env *env,
+                                   FILE *fp);
 #else
 static inline int machine__process_bpf_event(struct machine *machine __maybe_unused,
                                             union perf_event *event __maybe_unused,
@@ -27,12 +50,25 @@ static inline int machine__process_bpf_event(struct machine *machine __maybe_unu
        return 0;
 }
 
-static inline int perf_event__synthesize_bpf_events(struct perf_tool *tool __maybe_unused,
+static inline int perf_event__synthesize_bpf_events(struct perf_session *session __maybe_unused,
                                                    perf_event__handler_t process __maybe_unused,
                                                    struct machine *machine __maybe_unused,
                                                    struct record_opts *opts __maybe_unused)
 {
        return 0;
 }
+
+static inline int bpf_event__add_sb_event(struct perf_evlist **evlist __maybe_unused,
+                                         struct perf_env *env __maybe_unused)
+{
+       return 0;
+}
+
+static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
+                                                 struct perf_env *env __maybe_unused,
+                                                 FILE *fp __maybe_unused)
+{
+
+}
 #endif // HAVE_LIBBPF_SUPPORT
 #endif
index bff0d17920ed7dd6a386f31b6377e2cfb6353165..0c5517a8d0b772bc25acc1e9309fb4e00502965a 100644 (file)
@@ -185,6 +185,7 @@ char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size)
        return bf;
 }
 
+/* The caller is responsible to free the returned buffer. */
 char *build_id_cache__origname(const char *sbuild_id)
 {
        char *linkname;
index ca0fff6272be4c1dac0dde2a1b1580195b19486b..06f48312c5ed05b76c11fae05c7a3c5f7851cc18 100644 (file)
@@ -7,7 +7,6 @@
 #include "asm/bug.h"
 #include "debug.h"
 #include <unistd.h>
-#include <asm/unistd.h>
 #include <sys/syscall.h>
 
 static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
index fa092511c52b6af5921e7f8f2e3cf576d3bd01c4..7e3c1b60120c259a7b01f32cf1e109e4af873b89 100644 (file)
@@ -633,11 +633,10 @@ static int collect_config(const char *var, const char *value,
        }
 
        ret = set_value(item, value);
-       return ret;
 
 out_free:
        free(key);
-       return -1;
+       return ret;
 }
 
 int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
index ba4c623cd8de0d00f89ba452efe5b91569c9c794..39fe21e1cf930108adeec730cabc77ca76bc7f78 100644 (file)
@@ -387,6 +387,7 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
                break;
        case OCSD_INSTR_ISB:
        case OCSD_INSTR_DSB_DMB:
+       case OCSD_INSTR_WFI_WFE:
        case OCSD_INSTR_OTHER:
        default:
                packet->last_instr_taken_branch = false;
index 110804936fc3f27fa0891f40cc97d4b3483d55a3..de488b43f440ff03517cfe841835479f72815271 100644 (file)
@@ -422,11 +422,9 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm)
        if (!etmq->packet)
                goto out_free;
 
-       if (etm->synth_opts.last_branch || etm->sample_branches) {
-               etmq->prev_packet = zalloc(szp);
-               if (!etmq->prev_packet)
-                       goto out_free;
-       }
+       etmq->prev_packet = zalloc(szp);
+       if (!etmq->prev_packet)
+               goto out_free;
 
        if (etm->synth_opts.last_branch) {
                size_t sz = sizeof(struct branch_stack);
@@ -981,7 +979,6 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
         * PREV_PACKET is a branch.
         */
        if (etm->synth_opts.last_branch &&
-           etmq->prev_packet &&
            etmq->prev_packet->sample_type == CS_ETM_RANGE &&
            etmq->prev_packet->last_instr_taken_branch)
                cs_etm__update_last_branch_rb(etmq);
@@ -1014,7 +1011,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
                etmq->period_instructions = instrs_over;
        }
 
-       if (etm->sample_branches && etmq->prev_packet) {
+       if (etm->sample_branches) {
                bool generate_sample = false;
 
                /* Generate sample for tracing on packet */
@@ -1071,9 +1068,6 @@ static int cs_etm__flush(struct cs_etm_queue *etmq)
        struct cs_etm_auxtrace *etm = etmq->etm;
        struct cs_etm_packet *tmp;
 
-       if (!etmq->prev_packet)
-               return 0;
-
        /* Handle start tracing packet */
        if (etmq->prev_packet->sample_type == CS_ETM_EMPTY)
                goto swap_packet;
index 26af43ad9ddd331bd74b62a09d2833821bfc3996..e0311c9750ad5c9128c2cf6f4ce54d73c70a333f 100644 (file)
@@ -310,7 +310,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
        if (flags & TEP_FIELD_IS_DYNAMIC) {
                unsigned long long tmp_val;
 
-               tmp_val = tep_read_number(fmtf->event->pevent,
+               tmp_val = tep_read_number(fmtf->event->tep,
                                          data + offset, len);
                offset = tmp_val;
                len = offset >> 16;
@@ -354,7 +354,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
                        unsigned long long value_int;
 
                        value_int = tep_read_number(
-                                       fmtf->event->pevent,
+                                       fmtf->event->tep,
                                        data + offset + i * len, len);
 
                        if (!(flags & TEP_FIELD_IS_SIGNED))
index e098e189f93e45e0217bb07758fab03f543a45c0..6a64f713710ddab8fff024831ec766dbca04dd81 100644 (file)
@@ -14,6 +14,7 @@
 #include "data.h"
 #include "util.h"
 #include "debug.h"
+#include "header.h"
 
 static void close_dir(struct perf_data_file *files, int nr)
 {
@@ -34,12 +35,16 @@ int perf_data__create_dir(struct perf_data *data, int nr)
        struct perf_data_file *files = NULL;
        int i, ret = -1;
 
+       if (WARN_ON(!data->is_dir))
+               return -EINVAL;
+
        files = zalloc(nr * sizeof(*files));
        if (!files)
                return -ENOMEM;
 
-       data->dir.files = files;
-       data->dir.nr    = nr;
+       data->dir.version = PERF_DIR_VERSION;
+       data->dir.files   = files;
+       data->dir.nr      = nr;
 
        for (i = 0; i < nr; i++) {
                struct perf_data_file *file = &files[i];
@@ -69,6 +74,13 @@ int perf_data__open_dir(struct perf_data *data)
        DIR *dir;
        int nr = 0;
 
+       if (WARN_ON(!data->is_dir))
+               return -EINVAL;
+
+       /* The version is provided by DIR_FORMAT feature. */
+       if (WARN_ON(data->dir.version != PERF_DIR_VERSION))
+               return -1;
+
        dir = opendir(data->path);
        if (!dir)
                return -EINVAL;
@@ -118,6 +130,26 @@ out_err:
        return ret;
 }
 
+int perf_data__update_dir(struct perf_data *data)
+{
+       int i;
+
+       if (WARN_ON(!data->is_dir))
+               return -EINVAL;
+
+       for (i = 0; i < data->dir.nr; i++) {
+               struct perf_data_file *file = &data->dir.files[i];
+               struct stat st;
+
+               if (fstat(file->fd, &st))
+                       return -1;
+
+               file->size = st.st_size;
+       }
+
+       return 0;
+}
+
 static bool check_pipe(struct perf_data *data)
 {
        struct stat st;
@@ -173,6 +205,16 @@ static int check_backup(struct perf_data *data)
        return 0;
 }
 
+static bool is_dir(struct perf_data *data)
+{
+       struct stat st;
+
+       if (stat(data->path, &st))
+               return false;
+
+       return (st.st_mode & S_IFMT) == S_IFDIR;
+}
+
 static int open_file_read(struct perf_data *data)
 {
        struct stat st;
@@ -254,6 +296,30 @@ static int open_file_dup(struct perf_data *data)
        return open_file(data);
 }
 
+static int open_dir(struct perf_data *data)
+{
+       int ret;
+
+       /*
+        * So far we open only the header, so we can read the data version and
+        * layout.
+        */
+       if (asprintf(&data->file.path, "%s/header", data->path) < 0)
+               return -1;
+
+       if (perf_data__is_write(data) &&
+           mkdir(data->path, S_IRWXU) < 0)
+               return -1;
+
+       ret = open_file(data);
+
+       /* Cleanup whatever we managed to create so far. */
+       if (ret && perf_data__is_write(data))
+               rm_rf_perf_data(data->path);
+
+       return ret;
+}
+
 int perf_data__open(struct perf_data *data)
 {
        if (check_pipe(data))
@@ -265,11 +331,18 @@ int perf_data__open(struct perf_data *data)
        if (check_backup(data))
                return -1;
 
-       return open_file_dup(data);
+       if (perf_data__is_read(data))
+               data->is_dir = is_dir(data);
+
+       return perf_data__is_dir(data) ?
+              open_dir(data) : open_file_dup(data);
 }
 
 void perf_data__close(struct perf_data *data)
 {
+       if (perf_data__is_dir(data))
+               perf_data__close_dir(data);
+
        zfree(&data->file.path);
        close(data->file.fd);
 }
@@ -288,9 +361,9 @@ ssize_t perf_data__write(struct perf_data *data,
 
 int perf_data__switch(struct perf_data *data,
                           const char *postfix,
-                          size_t pos, bool at_exit)
+                          size_t pos, bool at_exit,
+                          char **new_filepath)
 {
-       char *new_filepath;
        int ret;
 
        if (check_pipe(data))
@@ -298,15 +371,15 @@ int perf_data__switch(struct perf_data *data,
        if (perf_data__is_read(data))
                return -EINVAL;
 
-       if (asprintf(&new_filepath, "%s.%s", data->path, postfix) < 0)
+       if (asprintf(new_filepath, "%s.%s", data->path, postfix) < 0)
                return -ENOMEM;
 
        /*
         * Only fire a warning, don't return error, continue fill
         * original file.
         */
-       if (rename(data->path, new_filepath))
-               pr_warning("Failed to rename %s to %s\n", data->path, new_filepath);
+       if (rename(data->path, *new_filepath))
+               pr_warning("Failed to rename %s to %s\n", data->path, *new_filepath);
 
        if (!at_exit) {
                close(data->file.fd);
@@ -323,6 +396,22 @@ int perf_data__switch(struct perf_data *data,
        }
        ret = data->file.fd;
 out:
-       free(new_filepath);
        return ret;
 }
+
+unsigned long perf_data__size(struct perf_data *data)
+{
+       u64 size = data->file.size;
+       int i;
+
+       if (!data->is_dir)
+               return size;
+
+       for (i = 0; i < data->dir.nr; i++) {
+               struct perf_data_file *file = &data->dir.files[i];
+
+               size += file->size;
+       }
+
+       return size;
+}
index 14b47be2bd69b6d5578e74334fb5577594469efd..259868a39019890ce6c91de895fc7cf2048c7e6e 100644 (file)
@@ -19,10 +19,12 @@ struct perf_data {
        const char              *path;
        struct perf_data_file    file;
        bool                     is_pipe;
+       bool                     is_dir;
        bool                     force;
        enum perf_data_mode      mode;
 
        struct {
+               u64                      version;
                struct perf_data_file   *files;
                int                      nr;
        } dir;
@@ -43,14 +45,14 @@ static inline int perf_data__is_pipe(struct perf_data *data)
        return data->is_pipe;
 }
 
-static inline int perf_data__fd(struct perf_data *data)
+static inline bool perf_data__is_dir(struct perf_data *data)
 {
-       return data->file.fd;
+       return data->is_dir;
 }
 
-static inline unsigned long perf_data__size(struct perf_data *data)
+static inline int perf_data__fd(struct perf_data *data)
 {
-       return data->file.size;
+       return data->file.fd;
 }
 
 int perf_data__open(struct perf_data *data);
@@ -68,9 +70,11 @@ ssize_t perf_data_file__write(struct perf_data_file *file,
  */
 int perf_data__switch(struct perf_data *data,
                           const char *postfix,
-                          size_t pos, bool at_exit);
+                          size_t pos, bool at_exit, char **new_filepath);
 
 int perf_data__create_dir(struct perf_data *data, int nr);
 int perf_data__open_dir(struct perf_data *data);
 void perf_data__close_dir(struct perf_data *data);
+int perf_data__update_dir(struct perf_data *data);
+unsigned long perf_data__size(struct perf_data *data);
 #endif /* __PERF_DATA_H */
index ba58ba603b69c800454688d21bbb5d7134d5abac..e059976d9d9365abeb943647b456fbfa560e7775 100644 (file)
@@ -184,6 +184,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
        case DSO_BINARY_TYPE__KALLSYMS:
        case DSO_BINARY_TYPE__GUEST_KALLSYMS:
        case DSO_BINARY_TYPE__JAVA_JIT:
+       case DSO_BINARY_TYPE__BPF_PROG_INFO:
        case DSO_BINARY_TYPE__NOT_FOUND:
                ret = -1;
                break;
@@ -1141,28 +1142,34 @@ void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
 
 static void dso__set_basename(struct dso *dso)
 {
-       /*
-        * basename() may modify path buffer, so we must pass
-        * a copy.
-        */
-       char *base, *lname = strdup(dso->long_name);
+       char *base, *lname;
+       int tid;
 
-       if (!lname)
-               return;
-
-       /*
-        * basename() may return a pointer to internal
-        * storage which is reused in subsequent calls
-        * so copy the result.
-        */
-       base = strdup(basename(lname));
+       if (sscanf(dso->long_name, "/tmp/perf-%d.map", &tid) == 1) {
+               if (asprintf(&base, "[JIT] tid %d", tid) < 0)
+                       return;
+       } else {
+             /*
+              * basename() may modify path buffer, so we must pass
+               * a copy.
+               */
+               lname = strdup(dso->long_name);
+               if (!lname)
+                       return;
 
-       free(lname);
+               /*
+                * basename() may return a pointer to internal
+                * storage which is reused in subsequent calls
+                * so copy the result.
+                */
+               base = strdup(basename(lname));
 
-       if (!base)
-               return;
+               free(lname);
 
-       dso__set_short_name(dso, base, true);
+               if (!base)
+                       return;
+       }
+       dso__set_short_name(dso, base, true);
 }
 
 int dso__name_len(const struct dso *dso)
index bb417c54c25a3abfe26ac2f35fddd70405ed907d..6e3f63781e51ad2f8294dbbe8645c9ddf39a62cd 100644 (file)
@@ -14,6 +14,7 @@
 
 struct machine;
 struct map;
+struct perf_env;
 
 enum dso_binary_type {
        DSO_BINARY_TYPE__KALLSYMS = 0,
@@ -35,6 +36,7 @@ enum dso_binary_type {
        DSO_BINARY_TYPE__KCORE,
        DSO_BINARY_TYPE__GUEST_KCORE,
        DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+       DSO_BINARY_TYPE__BPF_PROG_INFO,
        DSO_BINARY_TYPE__NOT_FOUND,
 };
 
@@ -189,6 +191,12 @@ struct dso {
                u64              debug_frame_offset;
                u64              eh_frame_hdr_offset;
        } data;
+       /* bpf prog information */
+       struct {
+               u32             id;
+               u32             sub_id;
+               struct perf_env *env;
+       } bpf_prog;
 
        union { /* Tool specific area */
                void     *priv;
index 4c23779e271a31ce66d0deeb527a658e0ae90ae7..6a3eaf7d9353c5251f8c0d2a7f7ab6b6696ace0d 100644 (file)
 #include "env.h"
 #include "sane_ctype.h"
 #include "util.h"
+#include "bpf-event.h"
 #include <errno.h>
 #include <sys/utsname.h>
+#include <bpf/libbpf.h>
 
 struct perf_env perf_env;
 
+void perf_env__insert_bpf_prog_info(struct perf_env *env,
+                                   struct bpf_prog_info_node *info_node)
+{
+       __u32 prog_id = info_node->info_linear->info.id;
+       struct bpf_prog_info_node *node;
+       struct rb_node *parent = NULL;
+       struct rb_node **p;
+
+       down_write(&env->bpf_progs.lock);
+       p = &env->bpf_progs.infos.rb_node;
+
+       while (*p != NULL) {
+               parent = *p;
+               node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
+               if (prog_id < node->info_linear->info.id) {
+                       p = &(*p)->rb_left;
+               } else if (prog_id > node->info_linear->info.id) {
+                       p = &(*p)->rb_right;
+               } else {
+                       pr_debug("duplicated bpf prog info %u\n", prog_id);
+                       goto out;
+               }
+       }
+
+       rb_link_node(&info_node->rb_node, parent, p);
+       rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
+       env->bpf_progs.infos_cnt++;
+out:
+       up_write(&env->bpf_progs.lock);
+}
+
+struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+                                                       __u32 prog_id)
+{
+       struct bpf_prog_info_node *node = NULL;
+       struct rb_node *n;
+
+       down_read(&env->bpf_progs.lock);
+       n = env->bpf_progs.infos.rb_node;
+
+       while (n) {
+               node = rb_entry(n, struct bpf_prog_info_node, rb_node);
+               if (prog_id < node->info_linear->info.id)
+                       n = n->rb_left;
+               else if (prog_id > node->info_linear->info.id)
+                       n = n->rb_right;
+               else
+                       goto out;
+       }
+       node = NULL;
+
+out:
+       up_read(&env->bpf_progs.lock);
+       return node;
+}
+
+void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+{
+       struct rb_node *parent = NULL;
+       __u32 btf_id = btf_node->id;
+       struct btf_node *node;
+       struct rb_node **p;
+
+       down_write(&env->bpf_progs.lock);
+       p = &env->bpf_progs.btfs.rb_node;
+
+       while (*p != NULL) {
+               parent = *p;
+               node = rb_entry(parent, struct btf_node, rb_node);
+               if (btf_id < node->id) {
+                       p = &(*p)->rb_left;
+               } else if (btf_id > node->id) {
+                       p = &(*p)->rb_right;
+               } else {
+                       pr_debug("duplicated btf %u\n", btf_id);
+                       goto out;
+               }
+       }
+
+       rb_link_node(&btf_node->rb_node, parent, p);
+       rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
+       env->bpf_progs.btfs_cnt++;
+out:
+       up_write(&env->bpf_progs.lock);
+}
+
+struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+{
+       struct btf_node *node = NULL;
+       struct rb_node *n;
+
+       down_read(&env->bpf_progs.lock);
+       n = env->bpf_progs.btfs.rb_node;
+
+       while (n) {
+               node = rb_entry(n, struct btf_node, rb_node);
+               if (btf_id < node->id)
+                       n = n->rb_left;
+               else if (btf_id > node->id)
+                       n = n->rb_right;
+               else
+                       goto out;
+       }
+       node = NULL;
+
+out:
+       up_read(&env->bpf_progs.lock);
+       return node;
+}
+
+/* purge data in bpf_progs.infos tree */
+static void perf_env__purge_bpf(struct perf_env *env)
+{
+       struct rb_root *root;
+       struct rb_node *next;
+
+       down_write(&env->bpf_progs.lock);
+
+       root = &env->bpf_progs.infos;
+       next = rb_first(root);
+
+       while (next) {
+               struct bpf_prog_info_node *node;
+
+               node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+               next = rb_next(&node->rb_node);
+               rb_erase(&node->rb_node, root);
+               free(node);
+       }
+
+       env->bpf_progs.infos_cnt = 0;
+
+       root = &env->bpf_progs.btfs;
+       next = rb_first(root);
+
+       while (next) {
+               struct btf_node *node;
+
+               node = rb_entry(next, struct btf_node, rb_node);
+               next = rb_next(&node->rb_node);
+               rb_erase(&node->rb_node, root);
+               free(node);
+       }
+
+       env->bpf_progs.btfs_cnt = 0;
+
+       up_write(&env->bpf_progs.lock);
+}
+
 void perf_env__exit(struct perf_env *env)
 {
        int i;
 
+       perf_env__purge_bpf(env);
        zfree(&env->hostname);
        zfree(&env->os_release);
        zfree(&env->version);
@@ -38,6 +190,13 @@ void perf_env__exit(struct perf_env *env)
        zfree(&env->memory_nodes);
 }
 
+void perf_env__init(struct perf_env *env)
+{
+       env->bpf_progs.infos = RB_ROOT;
+       env->bpf_progs.btfs = RB_ROOT;
+       init_rwsem(&env->bpf_progs.lock);
+}
+
 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
 {
        int i;
index d01b8355f4caba9440d0e0d4db4c6f241e801dc7..4f8e2b485c01cb9dab58f12942734d076d35208c 100644 (file)
@@ -3,7 +3,9 @@
 #define __PERF_ENV_H
 
 #include <linux/types.h>
+#include <linux/rbtree.h>
 #include "cpumap.h"
+#include "rwsem.h"
 
 struct cpu_topology_map {
        int     socket_id;
@@ -64,8 +66,23 @@ struct perf_env {
        struct memory_node      *memory_nodes;
        unsigned long long       memory_bsize;
        u64                     clockid_res_ns;
+
+       /*
+        * bpf_info_lock protects bpf rbtrees. This is needed because the
+        * trees are accessed by different threads in perf-top
+        */
+       struct {
+               struct rw_semaphore     lock;
+               struct rb_root          infos;
+               u32                     infos_cnt;
+               struct rb_root          btfs;
+               u32                     btfs_cnt;
+       } bpf_progs;
 };
 
+struct bpf_prog_info_node;
+struct btf_node;
+
 extern struct perf_env perf_env;
 
 void perf_env__exit(struct perf_env *env);
@@ -80,4 +97,11 @@ const char *perf_env__arch(struct perf_env *env);
 const char *perf_env__raw_arch(struct perf_env *env);
 int perf_env__nr_cpus_avail(struct perf_env *env);
 
+void perf_env__init(struct perf_env *env);
+void perf_env__insert_bpf_prog_info(struct perf_env *env,
+                                   struct bpf_prog_info_node *info_node);
+struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+                                                       __u32 prog_id);
+void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
 #endif /* __PERF_ENV_H */
index 36ae7e92dab1d755ab1fa5b4db6836c8e4f7010c..4e908ec1ef64986ea3649d9f24492ba1f7e85d2a 100644 (file)
@@ -6,6 +6,7 @@
 #include <stdio.h>
 #include <linux/kernel.h>
 #include <linux/bpf.h>
+#include <linux/perf_event.h>
 
 #include "../perf.h"
 #include "build-id.h"
index ed20f4379956594a908299f08c9d2ef4ac140dc2..4b6783ff58131280d87fe2d9809baa566d9467ef 100644 (file)
@@ -19,6 +19,7 @@
 #include "debug.h"
 #include "units.h"
 #include "asm/bug.h"
+#include "bpf-event.h"
 #include <signal.h>
 #include <unistd.h>
 
@@ -230,35 +231,6 @@ void perf_evlist__set_leader(struct perf_evlist *evlist)
        }
 }
 
-void perf_event_attr__set_max_precise_ip(struct perf_event_attr *pattr)
-{
-       struct perf_event_attr attr = {
-               .type           = PERF_TYPE_HARDWARE,
-               .config         = PERF_COUNT_HW_CPU_CYCLES,
-               .exclude_kernel = 1,
-               .precise_ip     = 3,
-       };
-
-       event_attr_init(&attr);
-
-       /*
-        * Unnamed union member, not supported as struct member named
-        * initializer in older compilers such as gcc 4.4.7
-        */
-       attr.sample_period = 1;
-
-       while (attr.precise_ip != 0) {
-               int fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
-               if (fd != -1) {
-                       close(fd);
-                       break;
-               }
-               --attr.precise_ip;
-       }
-
-       pattr->precise_ip = attr.precise_ip;
-}
-
 int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise)
 {
        struct perf_evsel *evsel = perf_evsel__new_cycles(precise);
@@ -1037,7 +1009,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
  */
 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
                         unsigned int auxtrace_pages,
-                        bool auxtrace_overwrite, int nr_cblocks, int affinity)
+                        bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush)
 {
        struct perf_evsel *evsel;
        const struct cpu_map *cpus = evlist->cpus;
@@ -1047,7 +1019,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
         * Its value is decided by evsel's write_backward.
         * So &mp should not be passed through const pointer.
         */
-       struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity };
+       struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity, .flush = flush };
 
        if (!evlist->mmap)
                evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
@@ -1079,7 +1051,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
 
 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
 {
-       return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS);
+       return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1);
 }
 
 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
@@ -1856,3 +1828,125 @@ struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evsel_list,
        }
        return leader;
 }
+
+int perf_evlist__add_sb_event(struct perf_evlist **evlist,
+                             struct perf_event_attr *attr,
+                             perf_evsel__sb_cb_t cb,
+                             void *data)
+{
+       struct perf_evsel *evsel;
+       bool new_evlist = (*evlist) == NULL;
+
+       if (*evlist == NULL)
+               *evlist = perf_evlist__new();
+       if (*evlist == NULL)
+               return -1;
+
+       if (!attr->sample_id_all) {
+               pr_warning("enabling sample_id_all for all side band events\n");
+               attr->sample_id_all = 1;
+       }
+
+       evsel = perf_evsel__new_idx(attr, (*evlist)->nr_entries);
+       if (!evsel)
+               goto out_err;
+
+       evsel->side_band.cb = cb;
+       evsel->side_band.data = data;
+       perf_evlist__add(*evlist, evsel);
+       return 0;
+
+out_err:
+       if (new_evlist) {
+               perf_evlist__delete(*evlist);
+               *evlist = NULL;
+       }
+       return -1;
+}
+
+static void *perf_evlist__poll_thread(void *arg)
+{
+       struct perf_evlist *evlist = arg;
+       bool draining = false;
+       int i, done = 0;
+
+       while (!done) {
+               bool got_data = false;
+
+               if (evlist->thread.done)
+                       draining = true;
+
+               if (!draining)
+                       perf_evlist__poll(evlist, 1000);
+
+               for (i = 0; i < evlist->nr_mmaps; i++) {
+                       struct perf_mmap *map = &evlist->mmap[i];
+                       union perf_event *event;
+
+                       if (perf_mmap__read_init(map))
+                               continue;
+                       while ((event = perf_mmap__read_event(map)) != NULL) {
+                               struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
+
+                               if (evsel && evsel->side_band.cb)
+                                       evsel->side_band.cb(event, evsel->side_band.data);
+                               else
+                                       pr_warning("cannot locate proper evsel for the side band event\n");
+
+                               perf_mmap__consume(map);
+                               got_data = true;
+                       }
+                       perf_mmap__read_done(map);
+               }
+
+               if (draining && !got_data)
+                       break;
+       }
+       return NULL;
+}
+
+int perf_evlist__start_sb_thread(struct perf_evlist *evlist,
+                                struct target *target)
+{
+       struct perf_evsel *counter;
+
+       if (!evlist)
+               return 0;
+
+       if (perf_evlist__create_maps(evlist, target))
+               goto out_delete_evlist;
+
+       evlist__for_each_entry(evlist, counter) {
+               if (perf_evsel__open(counter, evlist->cpus,
+                                    evlist->threads) < 0)
+                       goto out_delete_evlist;
+       }
+
+       if (perf_evlist__mmap(evlist, UINT_MAX))
+               goto out_delete_evlist;
+
+       evlist__for_each_entry(evlist, counter) {
+               if (perf_evsel__enable(counter))
+                       goto out_delete_evlist;
+       }
+
+       evlist->thread.done = 0;
+       if (pthread_create(&evlist->thread.th, NULL, perf_evlist__poll_thread, evlist))
+               goto out_delete_evlist;
+
+       return 0;
+
+out_delete_evlist:
+       perf_evlist__delete(evlist);
+       evlist = NULL;
+       return -1;
+}
+
+void perf_evlist__stop_sb_thread(struct perf_evlist *evlist)
+{
+       if (!evlist)
+               return;
+       evlist->thread.done = 1;
+       pthread_join(evlist->thread.th, NULL);
+       perf_evlist__delete(evlist);
+}
index 744906dd488732a46801fcc50588a32c190b7a45..c9a0f72677fd4fba1c1947fa87c17545ec85f536 100644 (file)
@@ -54,6 +54,10 @@ struct perf_evlist {
                                       struct perf_sample *sample);
        u64             first_sample_time;
        u64             last_sample_time;
+       struct {
+               pthread_t               th;
+               volatile int            done;
+       } thread;
 };
 
 struct perf_evsel_str_handler {
@@ -87,6 +91,14 @@ int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
 
 int perf_evlist__add_dummy(struct perf_evlist *evlist);
 
+int perf_evlist__add_sb_event(struct perf_evlist **evlist,
+                             struct perf_event_attr *attr,
+                             perf_evsel__sb_cb_t cb,
+                             void *data);
+int perf_evlist__start_sb_thread(struct perf_evlist *evlist,
+                                struct target *target);
+void perf_evlist__stop_sb_thread(struct perf_evlist *evlist);
+
 int perf_evlist__add_newtp(struct perf_evlist *evlist,
                           const char *sys, const char *name, void *handler);
 
@@ -165,7 +177,8 @@ unsigned long perf_event_mlock_kb_in_pages(void);
 
 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
                         unsigned int auxtrace_pages,
-                        bool auxtrace_overwrite, int nr_cblocks, int affinity);
+                        bool auxtrace_overwrite, int nr_cblocks,
+                        int affinity, int flush);
 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
 void perf_evlist__munmap(struct perf_evlist *evlist);
 
@@ -303,8 +316,6 @@ void perf_evlist__to_front(struct perf_evlist *evlist,
 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
                                     struct perf_evsel *tracking_evsel);
 
-void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
-
 struct perf_evsel *
 perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
 
index 3bbf73e979c00aa4f9c363e9d224440190425457..a10cf4cde92059b3b667216bc625a5b472f7237a 100644 (file)
@@ -295,7 +295,6 @@ struct perf_evsel *perf_evsel__new_cycles(bool precise)
        if (!precise)
                goto new_event;
 
-       perf_event_attr__set_max_precise_ip(&attr);
        /*
         * Now let the usual logic to set up the perf_event_attr defaults
         * to kick in when we return and before perf_evsel__open() is called.
@@ -305,6 +304,8 @@ new_event:
        if (evsel == NULL)
                goto out;
 
+       evsel->precise_max = true;
+
        /* use asprintf() because free(evsel) assumes name is allocated */
        if (asprintf(&evsel->name, "cycles%s%s%.*s",
                     (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
@@ -579,6 +580,12 @@ static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
        return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
 }
 
+static int perf_evsel__tool_name(char *bf, size_t size)
+{
+       int ret = scnprintf(bf, size, "duration_time");
+       return ret;
+}
+
 const char *perf_evsel__name(struct perf_evsel *evsel)
 {
        char bf[128];
@@ -600,7 +607,10 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
                break;
 
        case PERF_TYPE_SOFTWARE:
-               perf_evsel__sw_name(evsel, bf, sizeof(bf));
+               if (evsel->tool_event)
+                       perf_evsel__tool_name(bf, sizeof(bf));
+               else
+                       perf_evsel__sw_name(evsel, bf, sizeof(bf));
                break;
 
        case PERF_TYPE_TRACEPOINT:
@@ -1036,7 +1046,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
        attr->mmap2 = track && !perf_missing_features.mmap2;
        attr->comm  = track;
        attr->ksymbol = track && !perf_missing_features.ksymbol;
-       attr->bpf_event = track && opts->bpf_event &&
+       attr->bpf_event = track && !opts->no_bpf_event &&
                !perf_missing_features.bpf_event;
 
        if (opts->record_namespaces)
@@ -1083,7 +1093,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
        }
 
        if (evsel->precise_max)
-               perf_event_attr__set_max_precise_ip(attr);
+               attr->precise_ip = 3;
 
        if (opts->all_user) {
                attr->exclude_kernel = 1;
@@ -1292,6 +1302,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
 {
        assert(list_empty(&evsel->node));
        assert(evsel->evlist == NULL);
+       perf_evsel__free_counts(evsel);
        perf_evsel__free_fd(evsel);
        perf_evsel__free_id(evsel);
        perf_evsel__free_config_terms(evsel);
@@ -1342,10 +1353,9 @@ void perf_counts_values__scale(struct perf_counts_values *count,
                        count->val = 0;
                } else if (count->run < count->ena) {
                        scaled = 1;
-                       count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
+                       count->val = (u64)((double) count->val * count->ena / count->run);
                }
-       } else
-               count->ena = count->run = 0;
+       }
 
        if (pscaled)
                *pscaled = scaled;
@@ -1749,6 +1759,59 @@ static bool ignore_missing_thread(struct perf_evsel *evsel,
        return true;
 }
 
+static void display_attr(struct perf_event_attr *attr)
+{
+       if (verbose >= 2) {
+               fprintf(stderr, "%.60s\n", graph_dotted_line);
+               fprintf(stderr, "perf_event_attr:\n");
+               perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
+               fprintf(stderr, "%.60s\n", graph_dotted_line);
+       }
+}
+
+static int perf_event_open(struct perf_evsel *evsel,
+                          pid_t pid, int cpu, int group_fd,
+                          unsigned long flags)
+{
+       int precise_ip = evsel->attr.precise_ip;
+       int fd;
+
+       while (1) {
+               pr_debug2("sys_perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx",
+                         pid, cpu, group_fd, flags);
+
+               fd = sys_perf_event_open(&evsel->attr, pid, cpu, group_fd, flags);
+               if (fd >= 0)
+                       break;
+
+               /*
+                * Do quick precise_ip fallback if:
+                *  - there is precise_ip set in perf_event_attr
+                *  - maximum precise is requested
+                *  - sys_perf_event_open failed with ENOTSUP error,
+                *    which is associated with wrong precise_ip
+                */
+               if (!precise_ip || !evsel->precise_max || (errno != ENOTSUP))
+                       break;
+
+               /*
+                * We tried all the precise_ip values, and it's
+                * still failing, so leave it to standard fallback.
+                */
+               if (!evsel->attr.precise_ip) {
+                       evsel->attr.precise_ip = precise_ip;
+                       break;
+               }
+
+               pr_debug2("\nsys_perf_event_open failed, error %d\n", -ENOTSUP);
+               evsel->attr.precise_ip--;
+               pr_debug2("decreasing precise_ip by one (%d)\n", evsel->attr.precise_ip);
+               display_attr(&evsel->attr);
+       }
+
+       return fd;
+}
+
 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
                     struct thread_map *threads)
 {
@@ -1824,12 +1887,7 @@ retry_sample_id:
        if (perf_missing_features.sample_id_all)
                evsel->attr.sample_id_all = 0;
 
-       if (verbose >= 2) {
-               fprintf(stderr, "%.60s\n", graph_dotted_line);
-               fprintf(stderr, "perf_event_attr:\n");
-               perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
-               fprintf(stderr, "%.60s\n", graph_dotted_line);
-       }
+       display_attr(&evsel->attr);
 
        for (cpu = 0; cpu < cpus->nr; cpu++) {
 
@@ -1841,13 +1899,10 @@ retry_sample_id:
 
                        group_fd = get_group_fd(evsel, cpu, thread);
 retry_open:
-                       pr_debug2("sys_perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx",
-                                 pid, cpus->map[cpu], group_fd, flags);
-
                        test_attr__ready();
 
-                       fd = sys_perf_event_open(&evsel->attr, pid, cpus->map[cpu],
-                                                group_fd, flags);
+                       fd = perf_event_open(evsel, pid, cpus->map[cpu],
+                                            group_fd, flags);
 
                        FD(evsel, cpu, thread) = fd;
 
@@ -2322,7 +2377,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
                if (data->user_regs.abi) {
                        u64 mask = evsel->attr.sample_regs_user;
 
-                       sz = hweight_long(mask) * sizeof(u64);
+                       sz = hweight64(mask) * sizeof(u64);
                        OVERFLOW_CHECK(array, sz, max_size);
                        data->user_regs.mask = mask;
                        data->user_regs.regs = (u64 *)array;
@@ -2378,7 +2433,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
                if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
                        u64 mask = evsel->attr.sample_regs_intr;
 
-                       sz = hweight_long(mask) * sizeof(u64);
+                       sz = hweight64(mask) * sizeof(u64);
                        OVERFLOW_CHECK(array, sz, max_size);
                        data->intr_regs.mask = mask;
                        data->intr_regs.regs = (u64 *)array;
@@ -2506,7 +2561,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
        if (type & PERF_SAMPLE_REGS_USER) {
                if (sample->user_regs.abi) {
                        result += sizeof(u64);
-                       sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
+                       sz = hweight64(sample->user_regs.mask) * sizeof(u64);
                        result += sz;
                } else {
                        result += sizeof(u64);
@@ -2534,7 +2589,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
        if (type & PERF_SAMPLE_REGS_INTR) {
                if (sample->intr_regs.abi) {
                        result += sizeof(u64);
-                       sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
+                       sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
                        result += sz;
                } else {
                        result += sizeof(u64);
@@ -2664,7 +2719,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
        if (type & PERF_SAMPLE_REGS_USER) {
                if (sample->user_regs.abi) {
                        *array++ = sample->user_regs.abi;
-                       sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
+                       sz = hweight64(sample->user_regs.mask) * sizeof(u64);
                        memcpy(array, sample->user_regs.regs, sz);
                        array = (void *)array + sz;
                } else {
@@ -2700,7 +2755,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
        if (type & PERF_SAMPLE_REGS_INTR) {
                if (sample->intr_regs.abi) {
                        *array++ = sample->intr_regs.abi;
-                       sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
+                       sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
                        memcpy(array, sample->intr_regs.regs, sz);
                        array = (void *)array + sz;
                } else {
index cc578e02e08fb41a281a0b33d523bcf037efc5b3..6d190cbf1070218e6048cb5a3d2b9bf8e843bb5f 100644 (file)
@@ -73,6 +73,13 @@ struct perf_evsel_config_term {
 
 struct perf_stat_evsel;
 
+typedef int (perf_evsel__sb_cb_t)(union perf_event *event, void *data);
+
+enum perf_tool_event {
+       PERF_TOOL_NONE          = 0,
+       PERF_TOOL_DURATION_TIME = 1,
+};
+
 /** struct perf_evsel - event selector
  *
  * @evlist - evlist this evsel is in, if it is in one.
@@ -119,6 +126,7 @@ struct perf_evsel {
        unsigned int            sample_size;
        int                     id_pos;
        int                     is_pos;
+       enum perf_tool_event    tool_event;
        bool                    uniquified_name;
        bool                    snapshot;
        bool                    supported;
@@ -151,6 +159,10 @@ struct perf_evsel {
        bool                    collect_stat;
        bool                    weak_group;
        const char              *pmu_name;
+       struct {
+               perf_evsel__sb_cb_t     *cb;
+               void                    *data;
+       } side_band;
 };
 
 union u64_swap {
index 01b324c275b9d1ba0bd771f30b49952550038295..2d2af2ac2b1e976041b5e05eca4374c6b424b8fa 100644 (file)
@@ -18,6 +18,7 @@
 #include <sys/utsname.h>
 #include <linux/time64.h>
 #include <dirent.h>
+#include <bpf/libbpf.h>
 
 #include "evlist.h"
 #include "evsel.h"
@@ -40,6 +41,7 @@
 #include "time-utils.h"
 #include "units.h"
 #include "cputopo.h"
+#include "bpf-event.h"
 
 #include "sane_ctype.h"
 
@@ -861,6 +863,104 @@ static int write_clockid(struct feat_fd *ff,
                        sizeof(ff->ph->env.clockid_res_ns));
 }
 
+static int write_dir_format(struct feat_fd *ff,
+                           struct perf_evlist *evlist __maybe_unused)
+{
+       struct perf_session *session;
+       struct perf_data *data;
+
+       session = container_of(ff->ph, struct perf_session, header);
+       data = session->data;
+
+       if (WARN_ON(!perf_data__is_dir(data)))
+               return -1;
+
+       return do_write(ff, &data->dir.version, sizeof(data->dir.version));
+}
+
+#ifdef HAVE_LIBBPF_SUPPORT
+static int write_bpf_prog_info(struct feat_fd *ff,
+                              struct perf_evlist *evlist __maybe_unused)
+{
+       struct perf_env *env = &ff->ph->env;
+       struct rb_root *root;
+       struct rb_node *next;
+       int ret;
+
+       down_read(&env->bpf_progs.lock);
+
+       ret = do_write(ff, &env->bpf_progs.infos_cnt,
+                      sizeof(env->bpf_progs.infos_cnt));
+       if (ret < 0)
+               goto out;
+
+       root = &env->bpf_progs.infos;
+       next = rb_first(root);
+       while (next) {
+               struct bpf_prog_info_node *node;
+               size_t len;
+
+               node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+               next = rb_next(&node->rb_node);
+               len = sizeof(struct bpf_prog_info_linear) +
+                       node->info_linear->data_len;
+
+               /* before writing to file, translate address to offset */
+               bpf_program__bpil_addr_to_offs(node->info_linear);
+               ret = do_write(ff, node->info_linear, len);
+               /*
+                * translate back to address even when do_write() fails,
+                * so that this function never changes the data.
+                */
+               bpf_program__bpil_offs_to_addr(node->info_linear);
+               if (ret < 0)
+                       goto out;
+       }
+out:
+       up_read(&env->bpf_progs.lock);
+       return ret;
+}
+#else // HAVE_LIBBPF_SUPPORT
+static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
+                              struct perf_evlist *evlist __maybe_unused)
+{
+       return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
+static int write_bpf_btf(struct feat_fd *ff,
+                        struct perf_evlist *evlist __maybe_unused)
+{
+       struct perf_env *env = &ff->ph->env;
+       struct rb_root *root;
+       struct rb_node *next;
+       int ret;
+
+       down_read(&env->bpf_progs.lock);
+
+       ret = do_write(ff, &env->bpf_progs.btfs_cnt,
+                      sizeof(env->bpf_progs.btfs_cnt));
+
+       if (ret < 0)
+               goto out;
+
+       root = &env->bpf_progs.btfs;
+       next = rb_first(root);
+       while (next) {
+               struct btf_node *node;
+
+               node = rb_entry(next, struct btf_node, rb_node);
+               next = rb_next(&node->rb_node);
+               ret = do_write(ff, &node->id,
+                              sizeof(u32) * 2 + node->data_size);
+               if (ret < 0)
+                       goto out;
+       }
+out:
+       up_read(&env->bpf_progs.lock);
+       return ret;
+}
+
 static int cpu_cache_level__sort(const void *a, const void *b)
 {
        struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
@@ -1341,6 +1441,63 @@ static void print_clockid(struct feat_fd *ff, FILE *fp)
                ff->ph->env.clockid_res_ns * 1000);
 }
 
+static void print_dir_format(struct feat_fd *ff, FILE *fp)
+{
+       struct perf_session *session;
+       struct perf_data *data;
+
+       session = container_of(ff->ph, struct perf_session, header);
+       data = session->data;
+
+       fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
+}
+
+static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
+{
+       struct perf_env *env = &ff->ph->env;
+       struct rb_root *root;
+       struct rb_node *next;
+
+       down_read(&env->bpf_progs.lock);
+
+       root = &env->bpf_progs.infos;
+       next = rb_first(root);
+
+       while (next) {
+               struct bpf_prog_info_node *node;
+
+               node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+               next = rb_next(&node->rb_node);
+
+               bpf_event__print_bpf_prog_info(&node->info_linear->info,
+                                              env, fp);
+       }
+
+       up_read(&env->bpf_progs.lock);
+}
+
+static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
+{
+       struct perf_env *env = &ff->ph->env;
+       struct rb_root *root;
+       struct rb_node *next;
+
+       down_read(&env->bpf_progs.lock);
+
+       root = &env->bpf_progs.btfs;
+       next = rb_first(root);
+
+       while (next) {
+               struct btf_node *node;
+
+               node = rb_entry(next, struct btf_node, rb_node);
+               next = rb_next(&node->rb_node);
+               fprintf(fp, "# btf info of id %u\n", node->id);
+       }
+
+       up_read(&env->bpf_progs.lock);
+}
+
 static void free_event_desc(struct perf_evsel *events)
 {
        struct perf_evsel *evsel;
@@ -2373,6 +2530,143 @@ static int process_clockid(struct feat_fd *ff,
        return 0;
 }
 
+static int process_dir_format(struct feat_fd *ff,
+                             void *_data __maybe_unused)
+{
+       struct perf_session *session;
+       struct perf_data *data;
+
+       session = container_of(ff->ph, struct perf_session, header);
+       data = session->data;
+
+       if (WARN_ON(!perf_data__is_dir(data)))
+               return -1;
+
+       return do_read_u64(ff, &data->dir.version);
+}
+
+#ifdef HAVE_LIBBPF_SUPPORT
+static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
+{
+       struct bpf_prog_info_linear *info_linear;
+       struct bpf_prog_info_node *info_node;
+       struct perf_env *env = &ff->ph->env;
+       u32 count, i;
+       int err = -1;
+
+       if (ff->ph->needs_swap) {
+               pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
+               return 0;
+       }
+
+       if (do_read_u32(ff, &count))
+               return -1;
+
+       down_write(&env->bpf_progs.lock);
+
+       for (i = 0; i < count; ++i) {
+               u32 info_len, data_len;
+
+               info_linear = NULL;
+               info_node = NULL;
+               if (do_read_u32(ff, &info_len))
+                       goto out;
+               if (do_read_u32(ff, &data_len))
+                       goto out;
+
+               if (info_len > sizeof(struct bpf_prog_info)) {
+                       pr_warning("detected invalid bpf_prog_info\n");
+                       goto out;
+               }
+
+               info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
+                                    data_len);
+               if (!info_linear)
+                       goto out;
+               info_linear->info_len = sizeof(struct bpf_prog_info);
+               info_linear->data_len = data_len;
+               if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
+                       goto out;
+               if (__do_read(ff, &info_linear->info, info_len))
+                       goto out;
+               if (info_len < sizeof(struct bpf_prog_info))
+                       memset(((void *)(&info_linear->info)) + info_len, 0,
+                              sizeof(struct bpf_prog_info) - info_len);
+
+               if (__do_read(ff, info_linear->data, data_len))
+                       goto out;
+
+               info_node = malloc(sizeof(struct bpf_prog_info_node));
+               if (!info_node)
+                       goto out;
+
+               /* after reading from file, translate offset to address */
+               bpf_program__bpil_offs_to_addr(info_linear);
+               info_node->info_linear = info_linear;
+               perf_env__insert_bpf_prog_info(env, info_node);
+       }
+
+       up_write(&env->bpf_progs.lock);
+       return 0;
+out:
+       free(info_linear);
+       free(info_node);
+       up_write(&env->bpf_progs.lock);
+       return err;
+}
+#else // HAVE_LIBBPF_SUPPORT
+static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
+{
+       return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
+static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
+{
+       struct perf_env *env = &ff->ph->env;
+       struct btf_node *node = NULL;
+       u32 count, i;
+       int err = -1;
+
+       if (ff->ph->needs_swap) {
+               pr_warning("interpreting btf from systems with endianity is not yet supported\n");
+               return 0;
+       }
+
+       if (do_read_u32(ff, &count))
+               return -1;
+
+       down_write(&env->bpf_progs.lock);
+
+       for (i = 0; i < count; ++i) {
+               u32 id, data_size;
+
+               if (do_read_u32(ff, &id))
+                       goto out;
+               if (do_read_u32(ff, &data_size))
+                       goto out;
+
+               node = malloc(sizeof(struct btf_node) + data_size);
+               if (!node)
+                       goto out;
+
+               node->id = id;
+               node->data_size = data_size;
+
+               if (__do_read(ff, node->data, data_size))
+                       goto out;
+
+               perf_env__insert_btf(env, node);
+               node = NULL;
+       }
+
+       err = 0;
+out:
+       up_write(&env->bpf_progs.lock);
+       free(node);
+       return err;
+}
+
 struct feature_ops {
        int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
        void (*print)(struct feat_fd *ff, FILE *fp);
@@ -2432,7 +2726,10 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
        FEAT_OPN(CACHE,         cache,          true),
        FEAT_OPR(SAMPLE_TIME,   sample_time,    false),
        FEAT_OPR(MEM_TOPOLOGY,  mem_topology,   true),
-       FEAT_OPR(CLOCKID,       clockid,        false)
+       FEAT_OPR(CLOCKID,       clockid,        false),
+       FEAT_OPN(DIR_FORMAT,    dir_format,     false),
+       FEAT_OPR(BPF_PROG_INFO, bpf_prog_info,  false),
+       FEAT_OPR(BPF_BTF,       bpf_btf,        false),
 };
 
 struct header_print_data {
index 0d553ddca0a3049f941d96a0ae0d68b71ea7a49c..386da49e1bfa05f7ac546b26d1e05730d8e2f3c9 100644 (file)
@@ -39,6 +39,9 @@ enum {
        HEADER_SAMPLE_TIME,
        HEADER_MEM_TOPOLOGY,
        HEADER_CLOCKID,
+       HEADER_DIR_FORMAT,
+       HEADER_BPF_PROG_INFO,
+       HEADER_BPF_BTF,
        HEADER_LAST_FEATURE,
        HEADER_FEAT_BITS        = 256,
 };
@@ -48,6 +51,10 @@ enum perf_header_version {
        PERF_HEADER_VERSION_2,
 };
 
+enum perf_dir_version {
+       PERF_DIR_VERSION        = 1,
+};
+
 struct perf_file_section {
        u64 offset;
        u64 size;
index f9eb95bf3938b7248a1df567c04d1f31cf7e1f36..7ace7a10054d82da540825ad2eb36d5a85b0a6a4 100644 (file)
@@ -19,6 +19,7 @@
 #include <math.h>
 #include <inttypes.h>
 #include <sys/param.h>
+#include <linux/time64.h>
 
 static bool hists__filter_entry_by_dso(struct hists *hists,
                                       struct hist_entry *he);
@@ -192,6 +193,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
        hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
        hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
        hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
+       hists__new_col_len(hists, HISTC_TIME, 12);
 
        if (h->srcline) {
                len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
@@ -246,6 +248,14 @@ static void he_stat__add_cpumode_period(struct he_stat *he_stat,
        }
 }
 
+static long hist_time(unsigned long htime)
+{
+       unsigned long time_quantum = symbol_conf.time_quantum;
+       if (time_quantum)
+               return (htime / time_quantum) * time_quantum;
+       return htime;
+}
+
 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
                                u64 weight)
 {
@@ -426,6 +436,13 @@ static int hist_entry__init(struct hist_entry *he,
                        goto err_rawdata;
        }
 
+       if (symbol_conf.res_sample) {
+               he->res_samples = calloc(sizeof(struct res_sample),
+                                       symbol_conf.res_sample);
+               if (!he->res_samples)
+                       goto err_srcline;
+       }
+
        INIT_LIST_HEAD(&he->pairs.node);
        thread__get(he->thread);
        he->hroot_in  = RB_ROOT_CACHED;
@@ -436,6 +453,9 @@ static int hist_entry__init(struct hist_entry *he,
 
        return 0;
 
+err_srcline:
+       free(he->srcline);
+
 err_rawdata:
        free(he->raw_data);
 
@@ -593,6 +613,32 @@ out:
        return he;
 }
 
+static unsigned random_max(unsigned high)
+{
+       unsigned thresh = -high % high;
+       for (;;) {
+               unsigned r = random();
+               if (r >= thresh)
+                       return r % high;
+       }
+}
+
+static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
+{
+       struct res_sample *r;
+       int j;
+
+       if (he->num_res < symbol_conf.res_sample) {
+               j = he->num_res++;
+       } else {
+               j = random_max(symbol_conf.res_sample);
+       }
+       r = &he->res_samples[j];
+       r->time = sample->time;
+       r->cpu = sample->cpu;
+       r->tid = sample->tid;
+}
+
 static struct hist_entry*
 __hists__add_entry(struct hists *hists,
                   struct addr_location *al,
@@ -635,10 +681,13 @@ __hists__add_entry(struct hists *hists,
                .raw_data = sample->raw_data,
                .raw_size = sample->raw_size,
                .ops = ops,
+               .time = hist_time(sample->time),
        }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
 
        if (!hists->has_callchains && he && he->callchain_size != 0)
                hists->has_callchains = true;
+       if (he && symbol_conf.res_sample)
+               hists__res_sample(he, sample);
        return he;
 }
 
@@ -1062,8 +1111,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
 
        err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
                                        iter->evsel, al, max_stack_depth);
-       if (err)
+       if (err) {
+               map__put(alm);
                return err;
+       }
 
        err = iter->ops->prepare_entry(iter, al);
        if (err)
@@ -1162,6 +1213,7 @@ void hist_entry__delete(struct hist_entry *he)
                mem_info__zput(he->mem_info);
        }
 
+       zfree(&he->res_samples);
        zfree(&he->stat_acc);
        free_srcline(he->srcline);
        if (he->srcfile && he->srcfile[0])
index 4af27fbab24f5c3ca3ca0eda289537b506846f20..76ff6c6d03b82f334ea8aa3528d8d817f164a066 100644 (file)
@@ -31,6 +31,7 @@ enum hist_filter {
 
 enum hist_column {
        HISTC_SYMBOL,
+       HISTC_TIME,
        HISTC_DSO,
        HISTC_THREAD,
        HISTC_COMM,
@@ -432,9 +433,18 @@ struct hist_browser_timer {
 };
 
 struct annotation_options;
+struct res_sample;
+
+enum rstype {
+       A_NORMAL,
+       A_ASM,
+       A_SOURCE
+};
 
 #ifdef HAVE_SLANG_SUPPORT
 #include "../ui/keysyms.h"
+void attr_to_script(char *buf, struct perf_event_attr *attr);
+
 int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel,
                             struct hist_browser_timer *hbt,
                             struct annotation_options *annotation_opts);
@@ -449,7 +459,13 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
                                  struct perf_env *env,
                                  bool warn_lost_event,
                                  struct annotation_options *annotation_options);
-int script_browse(const char *script_opt);
+
+int script_browse(const char *script_opt, struct perf_evsel *evsel);
+
+void run_script(char *cmd);
+int res_sample_browse(struct res_sample *res_samples, int num_res,
+                     struct perf_evsel *evsel, enum rstype rstype);
+void res_sample_init(void);
 #else
 static inline
 int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
@@ -478,11 +494,22 @@ static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
        return 0;
 }
 
-static inline int script_browse(const char *script_opt __maybe_unused)
+static inline int script_browse(const char *script_opt __maybe_unused,
+                               struct perf_evsel *evsel __maybe_unused)
 {
        return 0;
 }
 
+static inline int res_sample_browse(struct res_sample *res_samples __maybe_unused,
+                                   int num_res __maybe_unused,
+                                   struct perf_evsel *evsel __maybe_unused,
+                                   enum rstype rstype __maybe_unused)
+{
+       return 0;
+}
+
+static inline void res_sample_init(void) {}
+
 #define K_LEFT  -1000
 #define K_RIGHT -2000
 #define K_SWITCH_INPUT_DATA -3000
index 6e03db142091badf132f7a19869c76180f1075c4..872fab163585ac9dcf6b42ab7d95e8cae651714b 100644 (file)
@@ -251,19 +251,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
                if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
                        decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
                                                decoder->tsc_ctc_ratio_d;
-
-               /*
-                * Allow for timestamps appearing to backwards because a TSC
-                * packet has slipped past a MTC packet, so allow 2 MTC ticks
-                * or ...
-                */
-               decoder->tsc_slip = multdiv(2 << decoder->mtc_shift,
-                                       decoder->tsc_ctc_ratio_n,
-                                       decoder->tsc_ctc_ratio_d);
        }
-       /* ... or 0x100 paranoia */
-       if (decoder->tsc_slip < 0x100)
-               decoder->tsc_slip = 0x100;
+
+       /*
+        * A TSC packet can slip past MTC packets so that the timestamp appears
+        * to go backwards. One estimate is that can be up to about 40 CPU
+        * cycles, which is certainly less than 0x1000 TSC ticks, but accept
+        * slippage an order of magnitude more to be on the safe side.
+        */
+       decoder->tsc_slip = 0x10000;
 
        intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
        intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
index 61959aba7e27008b68bd4cde372b15f1bcaefe39..3c520baa198cfcc49276ca77a8f8c12d058bec4d 100644 (file)
@@ -1421,6 +1421,20 @@ static void machine__set_kernel_mmap(struct machine *machine,
                machine->vmlinux_map->end = ~0ULL;
 }
 
+static void machine__update_kernel_mmap(struct machine *machine,
+                                    u64 start, u64 end)
+{
+       struct map *map = machine__kernel_map(machine);
+
+       map__get(map);
+       map_groups__remove(&machine->kmaps, map);
+
+       machine__set_kernel_mmap(machine, start, end);
+
+       map_groups__insert(&machine->kmaps, map);
+       map__put(map);
+}
+
 int machine__create_kernel_maps(struct machine *machine)
 {
        struct dso *kernel = machine__get_kernel(machine);
@@ -1453,17 +1467,11 @@ int machine__create_kernel_maps(struct machine *machine)
                        goto out_put;
                }
 
-               /* we have a real start address now, so re-order the kmaps */
-               map = machine__kernel_map(machine);
-
-               map__get(map);
-               map_groups__remove(&machine->kmaps, map);
-
-               /* assume it's the last in the kmaps */
-               machine__set_kernel_mmap(machine, addr, ~0ULL);
-
-               map_groups__insert(&machine->kmaps, map);
-               map__put(map);
+               /*
+                * we have a real start address now, so re-order the kmaps
+                * assume it's the last in the kmaps
+                */
+               machine__update_kernel_mmap(machine, addr, ~0ULL);
        }
 
        if (machine__create_extra_kernel_maps(machine, kernel))
@@ -1599,7 +1607,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
                if (strstr(kernel->long_name, "vmlinux"))
                        dso__set_short_name(kernel, "[kernel.vmlinux]", false);
 
-               machine__set_kernel_mmap(machine, event->mmap.start,
+               machine__update_kernel_mmap(machine, event->mmap.start,
                                         event->mmap.start + event->mmap.len);
 
                /*
index fbeb0c6efaa6e5ea05383e7a9227798b313379d4..ee71efb9db62e676c8fab2a60c34d079ba7e7e7e 100644 (file)
@@ -261,6 +261,22 @@ bool __map__is_extra_kernel_map(const struct map *map)
        return kmap && kmap->name[0];
 }
 
+bool __map__is_bpf_prog(const struct map *map)
+{
+       const char *name;
+
+       if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
+               return true;
+
+       /*
+        * If PERF_RECORD_BPF_EVENT is not included, the dso will not have
+        * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can
+        * guess the type based on name.
+        */
+       name = map->dso->short_name;
+       return name && (strstr(name, "bpf_prog_") == name);
+}
+
 bool map__has_symbols(const struct map *map)
 {
        return dso__has_symbols(map->dso);
@@ -577,10 +593,25 @@ static void __maps__purge(struct maps *maps)
        }
 }
 
+static void __maps__purge_names(struct maps *maps)
+{
+       struct rb_root *root = &maps->names;
+       struct rb_node *next = rb_first(root);
+
+       while (next) {
+               struct map *pos = rb_entry(next, struct map, rb_node_name);
+
+               next = rb_next(&pos->rb_node_name);
+               rb_erase_init(&pos->rb_node_name, root);
+               map__put(pos);
+       }
+}
+
 static void maps__exit(struct maps *maps)
 {
        down_write(&maps->lock);
        __maps__purge(maps);
+       __maps__purge_names(maps);
        up_write(&maps->lock);
 }
 
@@ -895,10 +926,8 @@ static void __maps__insert_name(struct maps *maps, struct map *map)
                rc = strcmp(m->dso->short_name, map->dso->short_name);
                if (rc < 0)
                        p = &(*p)->rb_left;
-               else if (rc  > 0)
-                       p = &(*p)->rb_right;
                else
-                       return;
+                       p = &(*p)->rb_right;
        }
        rb_link_node(&map->rb_node_name, parent, p);
        rb_insert_color(&map->rb_node_name, &maps->names);
@@ -917,6 +946,9 @@ static void __maps__remove(struct maps *maps, struct map *map)
 {
        rb_erase_init(&map->rb_node, &maps->entries);
        map__put(map);
+
+       rb_erase_init(&map->rb_node_name, &maps->names);
+       map__put(map);
 }
 
 void maps__remove(struct maps *maps, struct map *map)
index 0e20749f2c55d533842171dd2b2a7262f02768d1..dc93787c74f01b65fa7fcc76388a57472db707e7 100644 (file)
@@ -159,10 +159,12 @@ int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name,
 
 bool __map__is_kernel(const struct map *map);
 bool __map__is_extra_kernel_map(const struct map *map);
+bool __map__is_bpf_prog(const struct map *map);
 
 static inline bool __map__is_kmodule(const struct map *map)
 {
-       return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map);
+       return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map) &&
+              !__map__is_bpf_prog(map);
 }
 
 bool map__has_symbols(const struct map *map);
index cdc7740fc18197a9114bea47564dd546ed4b4069..ef3d79b2c90b33c6eed9030c36873fb36a5cf653 100644 (file)
@@ -440,6 +440,8 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
 
        perf_mmap__setup_affinity_mask(map, mp);
 
+       map->flush = mp->flush;
+
        if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
                                &mp->auxtrace_mp, map->base, fd))
                return -1;
@@ -492,7 +494,7 @@ static int __perf_mmap__read_init(struct perf_mmap *md)
        md->start = md->overwrite ? head : old;
        md->end = md->overwrite ? old : head;
 
-       if (md->start == md->end)
+       if ((md->end - md->start) < md->flush)
                return -EAGAIN;
 
        size = md->end - md->start;
index e566c19b242b61d4490e2899f11ac38ac971ce5b..b82f8c2d55c475caefac428b2c1f13724c1160cd 100644 (file)
@@ -39,6 +39,7 @@ struct perf_mmap {
        } aio;
 #endif
        cpu_set_t       affinity_mask;
+       u64             flush;
 };
 
 /*
@@ -70,7 +71,7 @@ enum bkw_mmap_state {
 };
 
 struct mmap_params {
-       int                         prot, mask, nr_cblocks, affinity;
+       int                         prot, mask, nr_cblocks, affinity, flush;
        struct auxtrace_mmap_params auxtrace_mp;
 };
 
index ea523d3b248fe9bdfb2ded5bca9f88bc966b31b1..989fed6f43b5a5f34d485c39b7b8fa30790e6a2d 100644 (file)
@@ -270,6 +270,8 @@ static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
                "FINAL",
                "ROUND",
                "HALF ",
+               "TOP  ",
+               "TIME ",
        };
        int err;
        bool show_progress = false;
index 4dcc01b2532c842dddc720d31b528f6be5532c02..4432bfe039fd99668447bd2f407f5fd631f16855 100644 (file)
@@ -317,10 +317,12 @@ static struct perf_evsel *
 __add_event(struct list_head *list, int *idx,
            struct perf_event_attr *attr,
            char *name, struct perf_pmu *pmu,
-           struct list_head *config_terms, bool auto_merge_stats)
+           struct list_head *config_terms, bool auto_merge_stats,
+           const char *cpu_list)
 {
        struct perf_evsel *evsel;
-       struct cpu_map *cpus = pmu ? pmu->cpus : NULL;
+       struct cpu_map *cpus = pmu ? pmu->cpus :
+                              cpu_list ? cpu_map__new(cpu_list) : NULL;
 
        event_attr_init(attr);
 
@@ -348,7 +350,25 @@ static int add_event(struct list_head *list, int *idx,
                     struct perf_event_attr *attr, char *name,
                     struct list_head *config_terms)
 {
-       return __add_event(list, idx, attr, name, NULL, config_terms, false) ? 0 : -ENOMEM;
+       return __add_event(list, idx, attr, name, NULL, config_terms, false, NULL) ? 0 : -ENOMEM;
+}
+
+static int add_event_tool(struct list_head *list, int *idx,
+                         enum perf_tool_event tool_event)
+{
+       struct perf_evsel *evsel;
+       struct perf_event_attr attr = {
+               .type = PERF_TYPE_SOFTWARE,
+               .config = PERF_COUNT_SW_DUMMY,
+       };
+
+       evsel = __add_event(list, idx, &attr, NULL, NULL, NULL, false, "0");
+       if (!evsel)
+               return -ENOMEM;
+       evsel->tool_event = tool_event;
+       if (tool_event == PERF_TOOL_DURATION_TIME)
+               evsel->unit = strdup("ns");
+       return 0;
 }
 
 static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
@@ -1233,6 +1253,13 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
                         get_config_name(head_config), &config_terms);
 }
 
+int parse_events_add_tool(struct parse_events_state *parse_state,
+                         struct list_head *list,
+                         enum perf_tool_event tool_event)
+{
+       return add_event_tool(list, &parse_state->idx, tool_event);
+}
+
 int parse_events_add_pmu(struct parse_events_state *parse_state,
                         struct list_head *list, char *name,
                         struct list_head *head_config,
@@ -1267,7 +1294,8 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
 
        if (!head_config) {
                attr.type = pmu->type;
-               evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats);
+               evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL,
+                                   auto_merge_stats, NULL);
                if (evsel) {
                        evsel->pmu_name = name;
                        evsel->use_uncore_alias = use_uncore_alias;
@@ -1295,7 +1323,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
 
        evsel = __add_event(list, &parse_state->idx, &attr,
                            get_config_name(head_config), pmu,
-                           &config_terms, auto_merge_stats);
+                           &config_terms, auto_merge_stats, NULL);
        if (evsel) {
                evsel->unit = info.unit;
                evsel->scale = info.scale;
@@ -2271,6 +2299,7 @@ static bool is_event_supported(u8 type, unsigned config)
                perf_evsel__delete(evsel);
        }
 
+       thread_map__put(tmap);
        return ret;
 }
 
@@ -2341,6 +2370,7 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
                                printf("  %-50s [%s]\n", buf, "SDT event");
                                free(buf);
                        }
+                       free(path);
                } else
                        printf("  %-50s [%s]\n", nd->s, "SDT event");
                if (nd2) {
@@ -2427,6 +2457,25 @@ out_enomem:
        return evt_num;
 }
 
+static void print_tool_event(const char *name, const char *event_glob,
+                            bool name_only)
+{
+       if (event_glob && !strglobmatch(name, event_glob))
+               return;
+       if (name_only)
+               printf("%s ", name);
+       else
+               printf("  %-50s [%s]\n", name, "Tool event");
+
+}
+
+void print_tool_events(const char *event_glob, bool name_only)
+{
+       print_tool_event("duration_time", event_glob, name_only);
+       if (pager_in_use())
+               printf("\n");
+}
+
 void print_symbol_events(const char *event_glob, unsigned type,
                                struct event_symbol *syms, unsigned max,
                                bool name_only)
@@ -2510,6 +2559,7 @@ void print_events(const char *event_glob, bool name_only, bool quiet_flag,
 
        print_symbol_events(event_glob, PERF_TYPE_SOFTWARE,
                            event_symbols_sw, PERF_COUNT_SW_MAX, name_only);
+       print_tool_events(event_glob, name_only);
 
        print_hwcache_events(event_glob, name_only);
 
index 5ed035cbcbb72dcbcf5c73d39be2248c099e7452..a052cd6ac63e4ed2c1aa843a8cb64dba6831e0f0 100644 (file)
@@ -160,6 +160,10 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
                             struct list_head *list,
                             u32 type, u64 config,
                             struct list_head *head_config);
+enum perf_tool_event;
+int parse_events_add_tool(struct parse_events_state *parse_state,
+                         struct list_head *list,
+                         enum perf_tool_event tool_event);
 int parse_events_add_cache(struct list_head *list, int *idx,
                           char *type, char *op_result1, char *op_result2,
                           struct parse_events_error *error,
@@ -200,6 +204,7 @@ extern struct event_symbol event_symbols_sw[];
 void print_symbol_events(const char *event_glob, unsigned type,
                                struct event_symbol *syms, unsigned max,
                                bool name_only);
+void print_tool_events(const char *event_glob, bool name_only);
 void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
                             bool name_only);
 int print_hwcache_events(const char *event_glob, bool name_only);
index 7805c71aaae2e53dbc74c072b4e5eb2a73e6c23a..c54bfe88626c169e45e9c765d1dff0d53530bcff 100644 (file)
@@ -15,6 +15,7 @@
 #include "../perf.h"
 #include "parse-events.h"
 #include "parse-events-bison.h"
+#include "evsel.h"
 
 char *parse_events_get_text(yyscan_t yyscanner);
 YYSTYPE *parse_events_get_lval(yyscan_t yyscanner);
@@ -154,6 +155,14 @@ static int sym(yyscan_t scanner, int type, int config)
        return type == PERF_TYPE_HARDWARE ? PE_VALUE_SYM_HW : PE_VALUE_SYM_SW;
 }
 
+static int tool(yyscan_t scanner, enum perf_tool_event event)
+{
+       YYSTYPE *yylval = parse_events_get_lval(scanner);
+
+       yylval->num = event;
+       return PE_VALUE_SYM_TOOL;
+}
+
 static int term(yyscan_t scanner, int type)
 {
        YYSTYPE *yylval = parse_events_get_lval(scanner);
@@ -322,7 +331,7 @@ cpu-migrations|migrations                   { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COU
 alignment-faults                               { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
 emulation-faults                               { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
 dummy                                          { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
-duration_time                                  { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
+duration_time                                  { return tool(yyscanner, PERF_TOOL_DURATION_TIME); }
 bpf-output                                     { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_BPF_OUTPUT); }
 
        /*
index 44819bdb037dabbd820f3ba13988ffd7dbf1343e..6ad8d4914969b20449f883eceab5f9b5109cd380 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/types.h>
 #include "util.h"
 #include "pmu.h"
+#include "evsel.h"
 #include "debug.h"
 #include "parse-events.h"
 #include "parse-events-bison.h"
@@ -45,6 +46,7 @@ static void inc_group_count(struct list_head *list,
 
 %token PE_START_EVENTS PE_START_TERMS
 %token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM
+%token PE_VALUE_SYM_TOOL
 %token PE_EVENT_NAME
 %token PE_NAME
 %token PE_BPF_OBJECT PE_BPF_SOURCE
@@ -58,6 +60,7 @@ static void inc_group_count(struct list_head *list,
 %type <num> PE_VALUE
 %type <num> PE_VALUE_SYM_HW
 %type <num> PE_VALUE_SYM_SW
+%type <num> PE_VALUE_SYM_TOOL
 %type <num> PE_RAW
 %type <num> PE_TERM
 %type <str> PE_NAME
@@ -321,6 +324,15 @@ value_sym sep_slash_slash_dc
        ABORT_ON(parse_events_add_numeric(_parse_state, list, type, config, NULL));
        $$ = list;
 }
+|
+PE_VALUE_SYM_TOOL sep_slash_slash_dc
+{
+       struct list_head *list;
+
+       ALLOC_LIST(list);
+       ABORT_ON(parse_events_add_tool(_parse_state, list, $1));
+       $$ = list;
+}
 
 event_legacy_cache:
 PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT opt_event_config
index 6199a3174ab95eccd1c135dc7d477161296f9883..e0429f4ef335896a9ad393f1e3e3f30b4a4acc9f 100644 (file)
@@ -732,10 +732,20 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
 
                if (!is_arm_pmu_core(name)) {
                        pname = pe->pmu ? pe->pmu : "cpu";
+
+                       /*
+                        * uncore alias may be from different PMU
+                        * with common prefix
+                        */
+                       if (pmu_is_uncore(name) &&
+                           !strncmp(pname, name, strlen(pname)))
+                               goto new_alias;
+
                        if (strcmp(pname, name))
                                continue;
                }
 
+new_alias:
                /* need type casts to override 'const' */
                __perf_pmu__new_alias(head, NULL, (char *)pe->name,
                                (char *)pe->desc, (char *)pe->event,
index a1b8d9649ca737abf09c24934cb8af8b78821e82..198e09ff611e48cecce9ae3795f0a7c8a63c2c27 100644 (file)
@@ -160,8 +160,10 @@ static struct map *kernel_get_module_map(const char *module)
        if (module && strchr(module, '/'))
                return dso__new_map(module);
 
-       if (!module)
-               module = "kernel";
+       if (!module) {
+               pos = machine__kernel_map(host_machine);
+               return map__get(pos);
+       }
 
        for (pos = maps__first(maps); pos; pos = map__next(pos)) {
                /* short_name is "[module]" */
index dda0ac978b1eb2371d9feebc64154d5cf9bb949e..6aa7e2352e16e118097ca128cedb69976cb7205c 100644 (file)
@@ -342,7 +342,7 @@ static bool is_tracepoint(struct pyrf_event *pevent)
 static PyObject*
 tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
 {
-       struct tep_handle *pevent = field->event->pevent;
+       struct tep_handle *pevent = field->event->tep;
        void *data = pe->sample.raw_data;
        PyObject *ret = NULL;
        unsigned long long val;
index 5f06378a482b80b53459509b4a141a1b1234253f..61aa7f3df915b80d0e73f06b3ce0ba685ace594e 100644 (file)
@@ -372,7 +372,7 @@ static void perl_process_tracepoint(struct perf_sample *sample,
        ns = nsecs - s * NSEC_PER_SEC;
 
        scripting_context->event_data = data;
-       scripting_context->pevent = evsel->tp_format->pevent;
+       scripting_context->pevent = evsel->tp_format->tep;
 
        ENTER;
        SAVETMPS;
index 09604c6508f040098eec7dbab6164afb5bc42768..22f52b6698719ed53b883071055182f5a53776b3 100644 (file)
@@ -837,7 +837,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
        ns = nsecs - s * NSEC_PER_SEC;
 
        scripting_context->event_data = data;
-       scripting_context->pevent = evsel->tp_format->pevent;
+       scripting_context->pevent = evsel->tp_format->tep;
 
        context = _PyCapsule_New(scripting_context, NULL, NULL);
 
index db643f3c2b9544d5bc12ce439b1a86817de6af5d..bad5f87ae001b06427f04aed3db5b6501ab90b31 100644 (file)
@@ -132,6 +132,7 @@ struct perf_session *perf_session__new(struct perf_data *data,
        ordered_events__init(&session->ordered_events,
                             ordered_events__deliver_event, NULL);
 
+       perf_env__init(&session->header.env);
        if (data) {
                if (perf_data__open(data))
                        goto out_delete;
@@ -152,6 +153,10 @@ struct perf_session *perf_session__new(struct perf_data *data,
                        }
 
                        perf_evlist__init_trace_event_sample_raw(session->evlist);
+
+                       /* Open the directory data. */
+                       if (data->is_dir && perf_data__open_dir(data))
+                               goto out_delete;
                }
        } else  {
                session->machines.host.env = &perf_env;
@@ -1843,10 +1848,17 @@ fetch_mmaped_event(struct perf_session *session,
 #define NUM_MMAPS 128
 #endif
 
+struct reader;
+
+typedef s64 (*reader_cb_t)(struct perf_session *session,
+                          union perf_event *event,
+                          u64 file_offset);
+
 struct reader {
-       int     fd;
-       u64     data_size;
-       u64     data_offset;
+       int              fd;
+       u64              data_size;
+       u64              data_offset;
+       reader_cb_t      process;
 };
 
 static int
@@ -1916,12 +1928,14 @@ more:
 
        size = event->header.size;
 
+       skip = -EINVAL;
+
        if (size < sizeof(struct perf_event_header) ||
-           (skip = perf_session__process_event(session, event, file_pos)) < 0) {
-               pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
+           (skip = rd->process(session, event, file_pos)) < 0) {
+               pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
                       file_offset + head, event->header.size,
-                      event->header.type);
-               err = -EINVAL;
+                      event->header.type, strerror(-skip));
+               err = skip;
                goto out;
        }
 
@@ -1943,12 +1957,20 @@ out:
        return err;
 }
 
+static s64 process_simple(struct perf_session *session,
+                         union perf_event *event,
+                         u64 file_offset)
+{
+       return perf_session__process_event(session, event, file_offset);
+}
+
 static int __perf_session__process_events(struct perf_session *session)
 {
        struct reader rd = {
                .fd             = perf_data__fd(session->data),
                .data_size      = session->header.data_size,
                .data_offset    = session->header.data_offset,
+               .process        = process_simple,
        };
        struct ordered_events *oe = &session->ordered_events;
        struct perf_tool *tool = session->tool;
index d2299e912e591ac569ef62d2b034421798ca50d1..5d2518e89fc49f21ac6a85c69283c07dc05042ef 100644 (file)
@@ -3,6 +3,7 @@
 #include <inttypes.h>
 #include <regex.h>
 #include <linux/mman.h>
+#include <linux/time64.h>
 #include "sort.h"
 #include "hist.h"
 #include "comm.h"
 #include "evsel.h"
 #include "evlist.h"
 #include "strlist.h"
+#include "strbuf.h"
 #include <traceevent/event-parse.h>
 #include "mem-events.h"
 #include "annotate.h"
+#include "time-utils.h"
 #include <linux/kernel.h>
 
 regex_t                parent_regex;
@@ -654,6 +657,42 @@ struct sort_entry sort_socket = {
        .se_width_idx   = HISTC_SOCKET,
 };
 
+/* --sort time */
+
+static int64_t
+sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+       return right->time - left->time;
+}
+
+static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
+                                   size_t size, unsigned int width)
+{
+       unsigned long secs;
+       unsigned long long nsecs;
+       char he_time[32];
+
+       nsecs = he->time;
+       secs = nsecs / NSEC_PER_SEC;
+       nsecs -= secs * NSEC_PER_SEC;
+
+       if (symbol_conf.nanosecs)
+               snprintf(he_time, sizeof he_time, "%5lu.%09llu: ",
+                        secs, nsecs);
+       else
+               timestamp__scnprintf_usec(he->time, he_time,
+                                         sizeof(he_time));
+
+       return repsep_snprintf(bf, size, "%-.*s", width, he_time);
+}
+
+struct sort_entry sort_time = {
+       .se_header      = "Time",
+       .se_cmp         = sort__time_cmp,
+       .se_snprintf    = hist_entry__time_snprintf,
+       .se_width_idx   = HISTC_TIME,
+};
+
 /* --sort trace */
 
 static char *get_trace_output(struct hist_entry *he)
@@ -1634,6 +1673,7 @@ static struct sort_dimension common_sort_dimensions[] = {
        DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
        DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
        DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
+       DIM(SORT_TIME, "time", sort_time),
 };
 
 #undef DIM
@@ -3068,3 +3108,54 @@ void reset_output_field(void)
        reset_dimensions();
        perf_hpp__reset_output_field(&perf_hpp_list);
 }
+
+#define INDENT (3*8 + 1)
+
+static void add_key(struct strbuf *sb, const char *str, int *llen)
+{
+       if (*llen >= 75) {
+               strbuf_addstr(sb, "\n\t\t\t ");
+               *llen = INDENT;
+       }
+       strbuf_addf(sb, " %s", str);
+       *llen += strlen(str) + 1;
+}
+
+static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
+                           int *llen)
+{
+       int i;
+
+       for (i = 0; i < n; i++)
+               add_key(sb, s[i].name, llen);
+}
+
+static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
+                               int *llen)
+{
+       int i;
+
+       for (i = 0; i < n; i++)
+               add_key(sb, s[i].name, llen);
+}
+
+const char *sort_help(const char *prefix)
+{
+       struct strbuf sb;
+       char *s;
+       int len = strlen(prefix) + INDENT;
+
+       strbuf_init(&sb, 300);
+       strbuf_addstr(&sb, prefix);
+       add_hpp_sort_string(&sb, hpp_sort_dimensions,
+                           ARRAY_SIZE(hpp_sort_dimensions), &len);
+       add_sort_string(&sb, common_sort_dimensions,
+                           ARRAY_SIZE(common_sort_dimensions), &len);
+       add_sort_string(&sb, bstack_sort_dimensions,
+                           ARRAY_SIZE(bstack_sort_dimensions), &len);
+       add_sort_string(&sb, memory_sort_dimensions,
+                           ARRAY_SIZE(memory_sort_dimensions), &len);
+       s = strbuf_detach(&sb, NULL);
+       strbuf_release(&sb);
+       return s;
+}
index 2fbee0b1011c6a68d4cafefe9d0fc13d67f9c226..ce376a73f964dc83dc9a222b212cfcf3bad82aff 100644 (file)
@@ -47,6 +47,12 @@ extern struct sort_entry sort_srcline;
 extern enum sort_type sort__first_dimension;
 extern const char default_mem_sort_order[];
 
+struct res_sample {
+       u64 time;
+       int cpu;
+       int tid;
+};
+
 struct he_stat {
        u64                     period;
        u64                     period_sys;
@@ -135,10 +141,13 @@ struct hist_entry {
        char                    *srcfile;
        struct symbol           *parent;
        struct branch_info      *branch_info;
+       long                    time;
        struct hists            *hists;
        struct mem_info         *mem_info;
        void                    *raw_data;
        u32                     raw_size;
+       int                     num_res;
+       struct res_sample       *res_samples;
        void                    *trace_output;
        struct perf_hpp_list    *hpp_list;
        struct hist_entry       *parent_he;
@@ -231,6 +240,7 @@ enum sort_type {
        SORT_DSO_SIZE,
        SORT_CGROUP_ID,
        SORT_SYM_IPC_NULL,
+       SORT_TIME,
 
        /* branch stack specific sort keys */
        __SORT_BRANCH_STACK,
@@ -286,6 +296,8 @@ void reset_output_field(void);
 void sort__setup_elide(FILE *fp);
 void perf_hpp__set_elide(int idx, bool elide);
 
+const char *sort_help(const char *prefix);
+
 int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
 
 bool is_strict_order(const char *order);
index 6d043c78f3c20578e864df0a4313eda399340e46..3324f23c7efcff6aeecb35700da8c9689bbfc216 100644 (file)
 #define CNTR_NOT_SUPPORTED     "<not supported>"
 #define CNTR_NOT_COUNTED       "<not counted>"
 
-static bool is_duration_time(struct perf_evsel *evsel)
-{
-       return !strcmp(evsel->name, "duration_time");
-}
-
 static void print_running(struct perf_stat_config *config,
                          u64 run, u64 ena)
 {
@@ -628,9 +623,6 @@ static void print_aggr(struct perf_stat_config *config,
                ad.id = id = config->aggr_map->map[s];
                first = true;
                evlist__for_each_entry(evlist, counter) {
-                       if (is_duration_time(counter))
-                               continue;
-
                        ad.val = ad.ena = ad.run = 0;
                        ad.nr = 0;
                        if (!collect_data(config, counter, aggr_cb, &ad))
@@ -848,8 +840,6 @@ static void print_no_aggr_metric(struct perf_stat_config *config,
                if (prefix)
                        fputs(prefix, config->output);
                evlist__for_each_entry(evlist, counter) {
-                       if (is_duration_time(counter))
-                               continue;
                        if (first) {
                                aggr_printout(config, counter, cpu, 0);
                                first = false;
@@ -906,8 +896,6 @@ static void print_metric_headers(struct perf_stat_config *config,
 
        /* Print metrics headers only */
        evlist__for_each_entry(evlist, counter) {
-               if (is_duration_time(counter))
-                       continue;
                os.evsel = counter;
                out.ctx = &os;
                out.print_metric = print_metric_header;
@@ -1136,15 +1124,11 @@ perf_evlist__print_counters(struct perf_evlist *evlist,
                break;
        case AGGR_THREAD:
                evlist__for_each_entry(evlist, counter) {
-                       if (is_duration_time(counter))
-                               continue;
                        print_aggr_thread(config, _target, counter, prefix);
                }
                break;
        case AGGR_GLOBAL:
                evlist__for_each_entry(evlist, counter) {
-                       if (is_duration_time(counter))
-                               continue;
                        print_counter_aggr(config, counter, prefix);
                }
                if (metric_only)
@@ -1155,8 +1139,6 @@ perf_evlist__print_counters(struct perf_evlist *evlist,
                        print_no_aggr_metric(config, evlist, prefix);
                else {
                        evlist__for_each_entry(evlist, counter) {
-                               if (is_duration_time(counter))
-                                       continue;
                                print_counter(config, counter, prefix);
                        }
                }
index 4d40515307b8024a4b438edfc8a7d1f2bfc97c9a..2856cc9d5a31e8aa55a631aa78b7875a12bd2c5b 100644 (file)
@@ -291,10 +291,8 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
                break;
        case AGGR_GLOBAL:
                aggr->val += count->val;
-               if (config->scale) {
-                       aggr->ena += count->ena;
-                       aggr->run += count->run;
-               }
+               aggr->ena += count->ena;
+               aggr->run += count->run;
        case AGGR_UNSET:
        default:
                break;
@@ -442,10 +440,8 @@ int create_perf_stat_counter(struct perf_evsel *evsel,
        struct perf_event_attr *attr = &evsel->attr;
        struct perf_evsel *leader = evsel->leader;
 
-       if (config->scale) {
-               attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
-                                   PERF_FORMAT_TOTAL_TIME_RUNNING;
-       }
+       attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+                           PERF_FORMAT_TOTAL_TIME_RUNNING;
 
        /*
         * The event is part of non trivial group, let's enable
index 758bf5f74e6ee91a7ff09ac667bf6ac6d18a9d73..5cbad55cd99dfbee9729b64331ea384da46cb2a8 100644 (file)
@@ -6,6 +6,7 @@
 #include <string.h>
 #include <linux/kernel.h>
 #include <linux/mman.h>
+#include <linux/time64.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <sys/param.h>
@@ -39,15 +40,18 @@ int vmlinux_path__nr_entries;
 char **vmlinux_path;
 
 struct symbol_conf symbol_conf = {
+       .nanosecs               = false,
        .use_modules            = true,
        .try_vmlinux_path       = true,
        .demangle               = true,
        .demangle_kernel        = false,
        .cumulate_callchain     = true,
+       .time_quantum           = 100 * NSEC_PER_MSEC, /* 100ms */
        .show_hist_headers      = true,
        .symfs                  = "",
        .event_group            = true,
        .inline_name            = true,
+       .res_sample             = 0,
 };
 
 static enum dso_binary_type binary_type_symtab[] = {
@@ -1451,6 +1455,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
        case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
                return true;
 
+       case DSO_BINARY_TYPE__BPF_PROG_INFO:
        case DSO_BINARY_TYPE__NOT_FOUND:
        default:
                return false;
index fffea68c12035da068cb6a121867cab65596e1b2..6c55fa6fccec151fce220bf0d0489a6e3dfca606 100644 (file)
@@ -8,6 +8,7 @@ struct strlist;
 struct intlist;
 
 struct symbol_conf {
+       bool            nanosecs;
        unsigned short  priv_size;
        bool            try_vmlinux_path,
                        init_annotation,
@@ -55,6 +56,7 @@ struct symbol_conf {
                        *sym_list_str,
                        *col_width_list_str,
                        *bt_stop_list_str;
+       unsigned long   time_quantum;
        struct strlist  *dso_list,
                        *comm_list,
                        *sym_list,
@@ -66,6 +68,7 @@ struct symbol_conf {
        struct intlist  *pid_list,
                        *tid_list;
        const char      *symfs;
+       int             res_sample;
 };
 
 extern struct symbol_conf symbol_conf;
index 0f53baec660e2494db6f3e506ddff91c05d89c36..20663a460df34d63d9d575ebea8a7e7681099bc7 100644 (file)
@@ -453,6 +453,14 @@ int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz)
        return scnprintf(buf, sz, "%"PRIu64".%06"PRIu64, sec, usec);
 }
 
+int timestamp__scnprintf_nsec(u64 timestamp, char *buf, size_t sz)
+{
+       u64 sec  = timestamp / NSEC_PER_SEC,
+           nsec = timestamp % NSEC_PER_SEC;
+
+       return scnprintf(buf, sz, "%" PRIu64 ".%09" PRIu64, sec, nsec);
+}
+
 int fetch_current_timestamp(char *buf, size_t sz)
 {
        struct timeval tv;
index b923de44e36f9b350c9b94c021ccee732bc5f7df..72a42ea1d513af55f54896da02ce66bc6d037a82 100644 (file)
@@ -30,6 +30,7 @@ int perf_time__parse_for_ranges(const char *str, struct perf_session *session,
                                int *range_size, int *range_num);
 
 int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz);
+int timestamp__scnprintf_nsec(u64 timestamp, char *buf, size_t sz);
 
 int fetch_current_timestamp(char *buf, size_t sz);
 
index ad74be1f0e4208ab04e9425464f1a977b82db02f..863955e4094e2af752e28f86eb72142f8f36eaaf 100644 (file)
@@ -111,7 +111,7 @@ raw_field_value(struct tep_event *event, const char *name, void *data)
 
 unsigned long long read_size(struct tep_event *event, void *ptr, int size)
 {
-       return tep_read_number(event->pevent, ptr, size);
+       return tep_read_number(event->tep, ptr, size);
 }
 
 void event_format__fprintf(struct tep_event *event,
index efe2f58cff4e4e7284171fa43244bfa9a91cc474..48d53d8e3e16893d2705d527835d98a4eeac393c 100644 (file)
@@ -442,7 +442,7 @@ ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe)
 
        tep_set_flag(pevent, TEP_NSEC_OUTPUT);
        tep_set_file_bigendian(pevent, file_bigendian);
-       tep_set_host_bigendian(pevent, host_bigendian);
+       tep_set_local_bigendian(pevent, host_bigendian);
 
        if (do_read(buf, 1) < 0)
                goto out;
index cbe0dd758e3ad2d9f36a6b15c2c7847b683a534a..01b9d89bf5bfc928d46c14a145ff75531021b458 100644 (file)
@@ -40,7 +40,7 @@ int trace_event__init(struct trace_event *t)
 
 static int trace_event__init2(void)
 {
-       int be = tep_host_bigendian();
+       int be = tep_is_bigendian();
        struct tep_handle *pevent;
 
        if (trace_event__init(&tevent))
@@ -49,7 +49,7 @@ static int trace_event__init2(void)
        pevent = tevent.pevent;
        tep_set_flag(pevent, TEP_NSEC_OUTPUT);
        tep_set_file_bigendian(pevent, be);
-       tep_set_host_bigendian(pevent, be);
+       tep_set_local_bigendian(pevent, be);
        tevent_initialized = true;
        return 0;
 }
index 9327c0ddc3a59c6424d33b6278b9cf7e51155ce7..c7727be9719f4ea9b9524ddf1ff92daeebab8982 100644 (file)
@@ -44,6 +44,7 @@
 #include <cpuid.h>
 #include <linux/capability.h>
 #include <errno.h>
+#include <math.h>
 
 char *proc_stat = "/proc/stat";
 FILE *outf;
@@ -63,7 +64,6 @@ unsigned int dump_only;
 unsigned int do_snb_cstates;
 unsigned int do_knl_cstates;
 unsigned int do_slm_cstates;
-unsigned int do_cnl_cstates;
 unsigned int use_c1_residency_msr;
 unsigned int has_aperf;
 unsigned int has_epb;
@@ -141,9 +141,21 @@ unsigned int first_counter_read = 1;
 
 #define RAPL_CORES_ENERGY_STATUS       (1 << 9)
                                        /* 0x639 MSR_PP0_ENERGY_STATUS */
+#define RAPL_PER_CORE_ENERGY   (1 << 10)
+                                       /* Indicates cores energy collection is per-core,
+                                        * not per-package. */
+#define RAPL_AMD_F17H          (1 << 11)
+                                       /* 0xc0010299 MSR_RAPL_PWR_UNIT */
+                                       /* 0xc001029a MSR_CORE_ENERGY_STAT */
+                                       /* 0xc001029b MSR_PKG_ENERGY_STAT */
 #define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT)
 #define        TJMAX_DEFAULT   100
 
+/* MSRs that are not yet in the kernel-provided header. */
+#define MSR_RAPL_PWR_UNIT      0xc0010299
+#define MSR_CORE_ENERGY_STAT   0xc001029a
+#define MSR_PKG_ENERGY_STAT    0xc001029b
+
 #define MAX(a, b) ((a) > (b) ? (a) : (b))
 
 /*
@@ -187,6 +199,7 @@ struct core_data {
        unsigned long long c7;
        unsigned long long mc6_us;      /* duplicate as per-core for now, even though per module */
        unsigned int core_temp_c;
+       unsigned int core_energy;       /* MSR_CORE_ENERGY_STAT */
        unsigned int core_id;
        unsigned long long counter[MAX_ADDED_COUNTERS];
 } *core_even, *core_odd;
@@ -273,6 +286,7 @@ struct system_summary {
 
 struct cpu_topology {
        int physical_package_id;
+       int die_id;
        int logical_cpu_id;
        int physical_node_id;
        int logical_node_id;    /* 0-based count within the package */
@@ -283,6 +297,7 @@ struct cpu_topology {
 
 struct topo_params {
        int num_packages;
+       int num_die;
        int num_cpus;
        int num_cores;
        int max_cpu_num;
@@ -314,9 +329,8 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
        int retval, pkg_no, core_no, thread_no, node_no;
 
        for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
-               for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
-                       for (node_no = 0; node_no < topo.nodes_per_pkg;
-                            node_no++) {
+               for (node_no = 0; node_no < topo.nodes_per_pkg; node_no++) {
+                       for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
                                for (thread_no = 0; thread_no <
                                        topo.threads_per_core; ++thread_no) {
                                        struct thread_data *t;
@@ -442,6 +456,7 @@ struct msr_counter bic[] = {
        { 0x0, "CPU" },
        { 0x0, "APIC" },
        { 0x0, "X2APIC" },
+       { 0x0, "Die" },
 };
 
 #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
@@ -495,6 +510,7 @@ struct msr_counter bic[] = {
 #define        BIC_CPU         (1ULL << 47)
 #define        BIC_APIC        (1ULL << 48)
 #define        BIC_X2APIC      (1ULL << 49)
+#define        BIC_Die         (1ULL << 50)
 
 #define BIC_DISABLED_BY_DEFAULT        (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
 
@@ -621,6 +637,8 @@ void print_header(char *delim)
                outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Package))
                outp += sprintf(outp, "%sPackage", (printed++ ? delim : ""));
+       if (DO_BIC(BIC_Die))
+               outp += sprintf(outp, "%sDie", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Node))
                outp += sprintf(outp, "%sNode", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Core))
@@ -667,7 +685,7 @@ void print_header(char *delim)
 
        if (DO_BIC(BIC_CPU_c1))
                outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : ""));
-       if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates)
+       if (DO_BIC(BIC_CPU_c3))
                outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : ""));
        if (DO_BIC(BIC_CPU_c6))
                outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : ""));
@@ -680,6 +698,14 @@ void print_header(char *delim)
        if (DO_BIC(BIC_CoreTmp))
                outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : ""));
 
+       if (do_rapl && !rapl_joules) {
+               if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
+                       outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
+       } else if (do_rapl && rapl_joules) {
+               if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
+                       outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
+       }
+
        for (mp = sys.cp; mp; mp = mp->next) {
                if (mp->format == FORMAT_RAW) {
                        if (mp->width == 64)
@@ -734,7 +760,7 @@ void print_header(char *delim)
        if (do_rapl && !rapl_joules) {
                if (DO_BIC(BIC_PkgWatt))
                        outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : ""));
-               if (DO_BIC(BIC_CorWatt))
+               if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
                        outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
                if (DO_BIC(BIC_GFXWatt))
                        outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : ""));
@@ -747,7 +773,7 @@ void print_header(char *delim)
        } else if (do_rapl && rapl_joules) {
                if (DO_BIC(BIC_Pkg_J))
                        outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : ""));
-               if (DO_BIC(BIC_Cor_J))
+               if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
                        outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
                if (DO_BIC(BIC_GFX_J))
                        outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : ""));
@@ -808,6 +834,7 @@ int dump_counters(struct thread_data *t, struct core_data *c,
                outp += sprintf(outp, "c6: %016llX\n", c->c6);
                outp += sprintf(outp, "c7: %016llX\n", c->c7);
                outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
+               outp += sprintf(outp, "Joules: %0X\n", c->core_energy);
 
                for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
                        outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n",
@@ -904,6 +931,8 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (t == &average.threads) {
                if (DO_BIC(BIC_Package))
                        outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+               if (DO_BIC(BIC_Die))
+                       outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
                if (DO_BIC(BIC_Node))
                        outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
                if (DO_BIC(BIC_Core))
@@ -921,6 +950,12 @@ int format_counters(struct thread_data *t, struct core_data *c,
                        else
                                outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
                }
+               if (DO_BIC(BIC_Die)) {
+                       if (c)
+                               outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].die_id);
+                       else
+                               outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+               }
                if (DO_BIC(BIC_Node)) {
                        if (t)
                                outp += sprintf(outp, "%s%d",
@@ -1003,7 +1038,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                goto done;
 
-       if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates)
+       if (DO_BIC(BIC_CPU_c3))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc);
        if (DO_BIC(BIC_CPU_c6))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc);
@@ -1033,6 +1068,20 @@ int format_counters(struct thread_data *t, struct core_data *c,
                }
        }
 
+       /*
+        * If measurement interval exceeds minimum RAPL Joule Counter range,
+        * indicate that results are suspect by printing "**" in fraction place.
+        */
+       if (interval_float < rapl_joule_counter_range)
+               fmt8 = "%s%.2f";
+       else
+               fmt8 = "%6.0f**";
+
+       if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
+               outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float);
+       if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
+               outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units);
+
        /* print per-package data only for 1st core in package */
        if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
                goto done;
@@ -1085,18 +1134,9 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (DO_BIC(BIC_SYS_LPI))
                outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float);
 
-       /*
-        * If measurement interval exceeds minimum RAPL Joule Counter range,
-        * indicate that results are suspect by printing "**" in fraction place.
-        */
-       if (interval_float < rapl_joule_counter_range)
-               fmt8 = "%s%.2f";
-       else
-               fmt8 = "%6.0f**";
-
        if (DO_BIC(BIC_PkgWatt))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float);
-       if (DO_BIC(BIC_CorWatt))
+       if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float);
        if (DO_BIC(BIC_GFXWatt))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float);
@@ -1104,7 +1144,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float);
        if (DO_BIC(BIC_Pkg_J))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units);
-       if (DO_BIC(BIC_Cor_J))
+       if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units);
        if (DO_BIC(BIC_GFX_J))
                outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units);
@@ -1249,6 +1289,8 @@ delta_core(struct core_data *new, struct core_data *old)
        old->core_temp_c = new->core_temp_c;
        old->mc6_us = new->mc6_us - old->mc6_us;
 
+       DELTA_WRAP32(new->core_energy, old->core_energy);
+
        for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
                if (mp->format == FORMAT_RAW)
                        old->counter[i] = new->counter[i];
@@ -1391,6 +1433,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
        c->c7 = 0;
        c->mc6_us = 0;
        c->core_temp_c = 0;
+       c->core_energy = 0;
 
        p->pkg_wtd_core_c0 = 0;
        p->pkg_any_core_c0 = 0;
@@ -1473,6 +1516,8 @@ int sum_counters(struct thread_data *t, struct core_data *c,
 
        average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
 
+       average.cores.core_energy += c->core_energy;
+
        for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
                if (mp->format == FORMAT_RAW)
                        continue;
@@ -1818,7 +1863,7 @@ retry:
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                goto done;
 
-       if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) {
+       if (DO_BIC(BIC_CPU_c3)) {
                if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
                        return -6;
        }
@@ -1845,6 +1890,12 @@ retry:
                c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
        }
 
+       if (do_rapl & RAPL_AMD_F17H) {
+               if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr))
+                       return -14;
+               c->core_energy = msr & 0xFFFFFFFF;
+       }
+
        for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
                if (get_mp(cpu, mp, &c->counter[i]))
                        return -10;
@@ -1934,6 +1985,11 @@ retry:
                        return -16;
                p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
        }
+       if (do_rapl & RAPL_AMD_F17H) {
+               if (get_msr(cpu, MSR_PKG_ENERGY_STAT, &msr))
+                       return -13;
+               p->energy_pkg = msr & 0xFFFFFFFF;
+       }
        if (DO_BIC(BIC_PkgTmp)) {
                if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
                        return -17;
@@ -2456,6 +2512,8 @@ void free_all_buffers(void)
 
 /*
  * Parse a file containing a single int.
+ * Return 0 if file can not be opened
+ * Exit if file can be opened, but can not be parsed
  */
 int parse_int_file(const char *fmt, ...)
 {
@@ -2467,7 +2525,9 @@ int parse_int_file(const char *fmt, ...)
        va_start(args, fmt);
        vsnprintf(path, sizeof(path), fmt, args);
        va_end(args);
-       filep = fopen_or_die(path, "r");
+       filep = fopen(path, "r");
+       if (!filep)
+               return 0;
        if (fscanf(filep, "%d", &value) != 1)
                err(1, "%s: failed to parse number from file", path);
        fclose(filep);
@@ -2488,6 +2548,11 @@ int get_physical_package_id(int cpu)
        return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
 }
 
+int get_die_id(int cpu)
+{
+       return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/die_id", cpu);
+}
+
 int get_core_id(int cpu)
 {
        return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
@@ -2578,7 +2643,8 @@ int get_thread_siblings(struct cpu_topology *thiscpu)
        filep = fopen_or_die(path, "r");
        do {
                offset -= BITMASK_SIZE;
-               fscanf(filep, "%lx%c", &map, &character);
+               if (fscanf(filep, "%lx%c", &map, &character) != 2)
+                       err(1, "%s: failed to parse file", path);
                for (shift = 0; shift < BITMASK_SIZE; shift++) {
                        if ((map >> shift) & 0x1) {
                                so = shift + offset;
@@ -2855,8 +2921,11 @@ int snapshot_cpu_lpi_us(void)
        fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r");
 
        retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us);
-       if (retval != 1)
-               err(1, "CPU LPI");
+       if (retval != 1) {
+               fprintf(stderr, "Disabling Low Power Idle CPU output\n");
+               BIC_NOT_PRESENT(BIC_CPU_LPI);
+               return -1;
+       }
 
        fclose(fp);
 
@@ -2878,9 +2947,11 @@ int snapshot_sys_lpi_us(void)
        fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r");
 
        retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us);
-       if (retval != 1)
-               err(1, "SYS LPI");
-
+       if (retval != 1) {
+               fprintf(stderr, "Disabling Low Power Idle System output\n");
+               BIC_NOT_PRESENT(BIC_SYS_LPI);
+               return -1;
+       }
        fclose(fp);
 
        return 0;
@@ -3410,14 +3481,14 @@ dump_sysfs_cstate_config(void)
                input = fopen(path, "r");
                if (input == NULL)
                        continue;
-               fgets(name_buf, sizeof(name_buf), input);
+               if (!fgets(name_buf, sizeof(name_buf), input))
+                       err(1, "%s: failed to read file", path);
 
                 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
                sp = strchr(name_buf, '-');
                if (!sp)
                        sp = strchrnul(name_buf, '\n');
                *sp = '\0';
-
                fclose(input);
 
                sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc",
@@ -3425,7 +3496,8 @@ dump_sysfs_cstate_config(void)
                input = fopen(path, "r");
                if (input == NULL)
                        continue;
-               fgets(desc, sizeof(desc), input);
+               if (!fgets(desc, sizeof(desc), input))
+                       err(1, "%s: failed to read file", path);
 
                fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc);
                fclose(input);
@@ -3444,20 +3516,22 @@ dump_sysfs_pstate_config(void)
                        base_cpu);
        input = fopen(path, "r");
        if (input == NULL) {
-               fprintf(stderr, "NSFOD %s\n", path);
+               fprintf(outf, "NSFOD %s\n", path);
                return;
        }
-       fgets(driver_buf, sizeof(driver_buf), input);
+       if (!fgets(driver_buf, sizeof(driver_buf), input))
+               err(1, "%s: failed to read file", path);
        fclose(input);
 
        sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor",
                        base_cpu);
        input = fopen(path, "r");
        if (input == NULL) {
-               fprintf(stderr, "NSFOD %s\n", path);
+               fprintf(outf, "NSFOD %s\n", path);
                return;
        }
-       fgets(governor_buf, sizeof(governor_buf), input);
+       if (!fgets(governor_buf, sizeof(governor_buf), input))
+               err(1, "%s: failed to read file", path);
        fclose(input);
 
        fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf);
@@ -3466,7 +3540,8 @@ dump_sysfs_pstate_config(void)
        sprintf(path, "/sys/devices/system/cpu/cpufreq/boost");
        input = fopen(path, "r");
        if (input != NULL) {
-               fscanf(input, "%d", &turbo);
+               if (fscanf(input, "%d", &turbo) != 1)
+                       err(1, "%s: failed to parse number from file", path);
                fprintf(outf, "cpufreq boost: %d\n", turbo);
                fclose(input);
        }
@@ -3474,7 +3549,8 @@ dump_sysfs_pstate_config(void)
        sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo");
        input = fopen(path, "r");
        if (input != NULL) {
-               fscanf(input, "%d", &turbo);
+               if (fscanf(input, "%d", &turbo) != 1)
+                       err(1, "%s: failed to parse number from file", path);
                fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo);
                fclose(input);
        }
@@ -3718,7 +3794,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
 #define        RAPL_POWER_GRANULARITY  0x7FFF  /* 15 bit power granularity */
 #define        RAPL_TIME_GRANULARITY   0x3F /* 6 bit time granularity */
 
-double get_tdp(unsigned int model)
+double get_tdp_intel(unsigned int model)
 {
        unsigned long long msr;
 
@@ -3735,6 +3811,16 @@ double get_tdp(unsigned int model)
        }
 }
 
+double get_tdp_amd(unsigned int family)
+{
+       switch (family) {
+       case 0x17:
+       default:
+               /* This is the max stock TDP of HEDT/Server Fam17h chips */
+               return 250.0;
+       }
+}
+
 /*
  * rapl_dram_energy_units_probe()
  * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
@@ -3754,21 +3840,12 @@ rapl_dram_energy_units_probe(int  model, double rapl_energy_units)
        }
 }
 
-
-/*
- * rapl_probe()
- *
- * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
- */
-void rapl_probe(unsigned int family, unsigned int model)
+void rapl_probe_intel(unsigned int family, unsigned int model)
 {
        unsigned long long msr;
        unsigned int time_unit;
        double tdp;
 
-       if (!genuine_intel)
-               return;
-
        if (family != 6)
                return;
 
@@ -3892,13 +3969,69 @@ void rapl_probe(unsigned int family, unsigned int model)
 
        rapl_time_units = 1.0 / (1 << (time_unit));
 
-       tdp = get_tdp(model);
+       tdp = get_tdp_intel(model);
 
        rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
        if (!quiet)
                fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
+}
 
-       return;
+void rapl_probe_amd(unsigned int family, unsigned int model)
+{
+       unsigned long long msr;
+       unsigned int eax, ebx, ecx, edx;
+       unsigned int has_rapl = 0;
+       double tdp;
+
+       if (max_extended_level >= 0x80000007) {
+               __cpuid(0x80000007, eax, ebx, ecx, edx);
+               /* RAPL (Fam 17h) */
+               has_rapl = edx & (1 << 14);
+       }
+
+       if (!has_rapl)
+               return;
+
+       switch (family) {
+       case 0x17: /* Zen, Zen+ */
+               do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
+               if (rapl_joules) {
+                       BIC_PRESENT(BIC_Pkg_J);
+                       BIC_PRESENT(BIC_Cor_J);
+               } else {
+                       BIC_PRESENT(BIC_PkgWatt);
+                       BIC_PRESENT(BIC_CorWatt);
+               }
+               break;
+       default:
+               return;
+       }
+
+       if (get_msr(base_cpu, MSR_RAPL_PWR_UNIT, &msr))
+               return;
+
+       rapl_time_units = ldexp(1.0, -(msr >> 16 & 0xf));
+       rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f));
+       rapl_power_units = ldexp(1.0, -(msr & 0xf));
+
+       tdp = get_tdp_amd(model);
+
+       rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
+       if (!quiet)
+               fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
+}
+
+/*
+ * rapl_probe()
+ *
+ * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
+ */
+void rapl_probe(unsigned int family, unsigned int model)
+{
+       if (genuine_intel)
+               rapl_probe_intel(family, model);
+       if (authentic_amd)
+               rapl_probe_amd(family, model);
 }
 
 void perf_limit_reasons_probe(unsigned int family, unsigned int model)
@@ -4003,6 +4136,7 @@ void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
 int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 {
        unsigned long long msr;
+       const char *msr_name;
        int cpu;
 
        if (!do_rapl)
@@ -4018,10 +4152,17 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                return -1;
        }
 
-       if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
-               return -1;
+       if (do_rapl & RAPL_AMD_F17H) {
+               msr_name = "MSR_RAPL_PWR_UNIT";
+               if (get_msr(cpu, MSR_RAPL_PWR_UNIT, &msr))
+                       return -1;
+       } else {
+               msr_name = "MSR_RAPL_POWER_UNIT";
+               if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
+                       return -1;
+       }
 
-       fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr,
+       fprintf(outf, "cpu%d: %s: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr_name, msr,
                rapl_power_units, rapl_energy_units, rapl_time_units);
 
        if (do_rapl & RAPL_PKG_POWER_INFO) {
@@ -4451,6 +4592,9 @@ unsigned int intel_model_duplicates(unsigned int model)
        case INTEL_FAM6_KABYLAKE_MOBILE:
        case INTEL_FAM6_KABYLAKE_DESKTOP:
                return INTEL_FAM6_SKYLAKE_MOBILE;
+
+       case INTEL_FAM6_ICELAKE_MOBILE:
+               return INTEL_FAM6_CANNONLAKE_MOBILE;
        }
        return model;
 }
@@ -4702,7 +4846,9 @@ void process_cpuid()
        }
        do_slm_cstates = is_slm(family, model);
        do_knl_cstates  = is_knl(family, model);
-       do_cnl_cstates = is_cnl(family, model);
+
+       if (do_slm_cstates || do_knl_cstates || is_cnl(family, model))
+               BIC_NOT_PRESENT(BIC_CPU_c3);
 
        if (!quiet)
                decode_misc_pwr_mgmt_msr();
@@ -4769,6 +4915,7 @@ void topology_probe()
        int i;
        int max_core_id = 0;
        int max_package_id = 0;
+       int max_die_id = 0;
        int max_siblings = 0;
 
        /* Initialize num_cpus, max_cpu_num */
@@ -4835,6 +4982,11 @@ void topology_probe()
                if (cpus[i].physical_package_id > max_package_id)
                        max_package_id = cpus[i].physical_package_id;
 
+               /* get die information */
+               cpus[i].die_id = get_die_id(i);
+               if (cpus[i].die_id > max_die_id)
+                       max_die_id = cpus[i].die_id;
+
                /* get numa node information */
                cpus[i].physical_node_id = get_physical_node_id(&cpus[i]);
                if (cpus[i].physical_node_id > topo.max_node_num)
@@ -4860,6 +5012,13 @@ void topology_probe()
        if (!summary_only && topo.cores_per_node > 1)
                BIC_PRESENT(BIC_Core);
 
+       topo.num_die = max_die_id + 1;
+       if (debug > 1)
+               fprintf(outf, "max_die_id %d, sizing for %d die\n",
+                               max_die_id, topo.num_die);
+       if (!summary_only && topo.num_die > 1)
+               BIC_PRESENT(BIC_Die);
+
        topo.num_packages = max_package_id + 1;
        if (debug > 1)
                fprintf(outf, "max_package_id %d, sizing for %d packages\n",
@@ -4884,8 +5043,8 @@ void topology_probe()
                if (cpu_is_not_present(i))
                        continue;
                fprintf(outf,
-                       "cpu %d pkg %d node %d lnode %d core %d thread %d\n",
-                       i, cpus[i].physical_package_id,
+                       "cpu %d pkg %d die %d node %d lnode %d core %d thread %d\n",
+                       i, cpus[i].physical_package_id, cpus[i].die_id,
                        cpus[i].physical_node_id,
                        cpus[i].logical_node_id,
                        cpus[i].physical_core_id,
@@ -5077,6 +5236,9 @@ int fork_it(char **argv)
                signal(SIGQUIT, SIG_IGN);
                if (waitpid(child_pid, &status, 0) == -1)
                        err(status, "waitpid");
+
+               if (WIFEXITED(status))
+                       status = WEXITSTATUS(status);
        }
        /*
         * n.b. fork_it() does not check for errors from for_all_cpus()
@@ -5119,7 +5281,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 18.07.27"
+       fprintf(outf, "turbostat version 19.03.20"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
@@ -5316,7 +5478,8 @@ void probe_sysfs(void)
                input = fopen(path, "r");
                if (input == NULL)
                        continue;
-               fgets(name_buf, sizeof(name_buf), input);
+               if (!fgets(name_buf, sizeof(name_buf), input))
+                       err(1, "%s: failed to read file", path);
 
                 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
                sp = strchr(name_buf, '-');
@@ -5343,7 +5506,8 @@ void probe_sysfs(void)
                input = fopen(path, "r");
                if (input == NULL)
                        continue;
-               fgets(name_buf, sizeof(name_buf), input);
+               if (!fgets(name_buf, sizeof(name_buf), input))
+                       err(1, "%s: failed to read file", path);
                 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
                sp = strchr(name_buf, '-');
                if (!sp)
index b579f962451d6464035c6649ac714998c05a225f..85ffdcfa596b5011b93abf3c65e90cd33cceb61f 100644 (file)
@@ -146,6 +146,7 @@ static int dimm_fail_cmd_code[ARRAY_SIZE(handle)];
 struct nfit_test_sec {
        u8 state;
        u8 ext_state;
+       u8 old_state;
        u8 passphrase[32];
        u8 master_passphrase[32];
        u64 overwrite_end_time;
@@ -225,6 +226,8 @@ static struct workqueue_struct *nfit_wq;
 
 static struct gen_pool *nfit_pool;
 
+static const char zero_key[NVDIMM_PASSPHRASE_LEN];
+
 static struct nfit_test *to_nfit_test(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -1059,8 +1062,7 @@ static int nd_intel_test_cmd_secure_erase(struct nfit_test *t,
        struct device *dev = &t->pdev.dev;
        struct nfit_test_sec *sec = &dimm_sec_info[dimm];
 
-       if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED) ||
-                       (sec->state & ND_INTEL_SEC_STATE_FROZEN)) {
+       if (sec->state & ND_INTEL_SEC_STATE_FROZEN) {
                nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
                dev_dbg(dev, "secure erase: wrong security state\n");
        } else if (memcmp(nd_cmd->passphrase, sec->passphrase,
@@ -1068,6 +1070,12 @@ static int nd_intel_test_cmd_secure_erase(struct nfit_test *t,
                nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
                dev_dbg(dev, "secure erase: wrong passphrase\n");
        } else {
+               if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED)
+                               && (memcmp(nd_cmd->passphrase, zero_key,
+                                       ND_INTEL_PASSPHRASE_SIZE) != 0)) {
+                       dev_dbg(dev, "invalid zero key\n");
+                       return 0;
+               }
                memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
                memset(sec->master_passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
                sec->state = 0;
@@ -1093,7 +1101,7 @@ static int nd_intel_test_cmd_overwrite(struct nfit_test *t,
                return 0;
        }
 
-       memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
+       sec->old_state = sec->state;
        sec->state = ND_INTEL_SEC_STATE_OVERWRITE;
        dev_dbg(dev, "overwrite progressing.\n");
        sec->overwrite_end_time = get_jiffies_64() + 5 * HZ;
@@ -1115,7 +1123,8 @@ static int nd_intel_test_cmd_query_overwrite(struct nfit_test *t,
 
        if (time_is_before_jiffies64(sec->overwrite_end_time)) {
                sec->overwrite_end_time = 0;
-               sec->state = 0;
+               sec->state = sec->old_state;
+               sec->old_state = 0;
                sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED;
                dev_dbg(dev, "overwrite is complete\n");
        } else
index c9433a496d548daf9417a1f19af71753f93547c0..c81fc350f7ad46ad60d53ac3dd8121059020f9a6 100644 (file)
@@ -180,6 +180,8 @@ static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) =
        (void *) BPF_FUNC_sk_fullsock;
 static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) =
        (void *) BPF_FUNC_tcp_sock;
+static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) =
+       (void *) BPF_FUNC_get_listener_sock;
 static int (*bpf_skb_ecn_set_ce)(void *ctx) =
        (void *) BPF_FUNC_skb_ecn_set_ce;
 
index bcbd928c96aba4ab3a2a13984b1987f1497a412d..fc818bc1d7294454093a949b3e5f394ca9879982 100644 (file)
@@ -39,6 +39,58 @@ static struct bpf_flow_keys pkt_v6_flow_keys = {
        .n_proto = __bpf_constant_htons(ETH_P_IPV6),
 };
 
+#define VLAN_HLEN      4
+
+static struct {
+       struct ethhdr eth;
+       __u16 vlan_tci;
+       __u16 vlan_proto;
+       struct iphdr iph;
+       struct tcphdr tcp;
+} __packed pkt_vlan_v4 = {
+       .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
+       .vlan_proto = __bpf_constant_htons(ETH_P_IP),
+       .iph.ihl = 5,
+       .iph.protocol = IPPROTO_TCP,
+       .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
+       .tcp.urg_ptr = 123,
+       .tcp.doff = 5,
+};
+
+static struct bpf_flow_keys pkt_vlan_v4_flow_keys = {
+       .nhoff = VLAN_HLEN,
+       .thoff = VLAN_HLEN + sizeof(struct iphdr),
+       .addr_proto = ETH_P_IP,
+       .ip_proto = IPPROTO_TCP,
+       .n_proto = __bpf_constant_htons(ETH_P_IP),
+};
+
+static struct {
+       struct ethhdr eth;
+       __u16 vlan_tci;
+       __u16 vlan_proto;
+       __u16 vlan_tci2;
+       __u16 vlan_proto2;
+       struct ipv6hdr iph;
+       struct tcphdr tcp;
+} __packed pkt_vlan_v6 = {
+       .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
+       .vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
+       .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
+       .iph.nexthdr = IPPROTO_TCP,
+       .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
+       .tcp.urg_ptr = 123,
+       .tcp.doff = 5,
+};
+
+static struct bpf_flow_keys pkt_vlan_v6_flow_keys = {
+       .nhoff = VLAN_HLEN * 2,
+       .thoff = VLAN_HLEN * 2 + sizeof(struct ipv6hdr),
+       .addr_proto = ETH_P_IPV6,
+       .ip_proto = IPPROTO_TCP,
+       .n_proto = __bpf_constant_htons(ETH_P_IPV6),
+};
+
 void test_flow_dissector(void)
 {
        struct bpf_flow_keys flow_keys;
@@ -68,5 +120,21 @@ void test_flow_dissector(void)
              err, errno, retval, duration, size, sizeof(flow_keys));
        CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys);
 
+       err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v4, sizeof(pkt_vlan_v4),
+                               &flow_keys, &size, &retval, &duration);
+       CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv4",
+             "err %d errno %d retval %d duration %d size %u/%lu\n",
+             err, errno, retval, duration, size, sizeof(flow_keys));
+       CHECK_FLOW_KEYS("vlan_ipv4_flow_keys", flow_keys,
+                       pkt_vlan_v4_flow_keys);
+
+       err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v6, sizeof(pkt_vlan_v6),
+                               &flow_keys, &size, &retval, &duration);
+       CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv6",
+             "err %d errno %d retval %d duration %d size %u/%lu\n",
+             err, errno, retval, duration, size, sizeof(flow_keys));
+       CHECK_FLOW_KEYS("vlan_ipv6_flow_keys", flow_keys,
+                       pkt_vlan_v6_flow_keys);
+
        bpf_object__close(obj);
 }
index 90f8a206340ab4daa61c401144d45cbec6b15a63..ee99368c595ca0b0768ad7938212bc80977bacf1 100644 (file)
@@ -37,7 +37,7 @@ void test_map_lock(void)
        const char *file = "./test_map_lock.o";
        int prog_fd, map_fd[2], vars[17] = {};
        pthread_t thread_id[6];
-       struct bpf_object *obj;
+       struct bpf_object *obj = NULL;
        int err = 0, key = 0, i;
        void *ret;
 
index 9a573a9675d74beee07fc6025f1504682c394c21..114ebe6a438e562d864971a5a5d174b1e0936f8a 100644 (file)
@@ -5,7 +5,7 @@ void test_spinlock(void)
 {
        const char *file = "./test_spin_lock.o";
        pthread_t thread_id[4];
-       struct bpf_object *obj;
+       struct bpf_object *obj = NULL;
        int prog_fd;
        int err = 0, i;
        void *ret;
index 284660f5aa9533aaee034e13242f5ea098729676..75b17cada53937e5e237e00049a1bc22176cb83c 100644 (file)
@@ -92,7 +92,6 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
 {
        struct bpf_flow_keys *keys = skb->flow_keys;
 
-       keys->n_proto = proto;
        switch (proto) {
        case bpf_htons(ETH_P_IP):
                bpf_tail_call(skb, &jmp_table, IP);
@@ -119,10 +118,9 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
 SEC("flow_dissector")
 int _dissect(struct __sk_buff *skb)
 {
-       if (!skb->vlan_present)
-               return parse_eth_proto(skb, skb->protocol);
-       else
-               return parse_eth_proto(skb, skb->vlan_proto);
+       struct bpf_flow_keys *keys = skb->flow_keys;
+
+       return parse_eth_proto(skb, keys->n_proto);
 }
 
 /* Parses on IPPROTO_* */
@@ -336,15 +334,9 @@ PROG(VLAN)(struct __sk_buff *skb)
 {
        struct bpf_flow_keys *keys = skb->flow_keys;
        struct vlan_hdr *vlan, _vlan;
-       __be16 proto;
-
-       /* Peek back to see if single or double-tagging */
-       if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto,
-                              sizeof(proto)))
-               return BPF_DROP;
 
        /* Account for double-tagging */
-       if (proto == bpf_htons(ETH_P_8021AD)) {
+       if (keys->n_proto == bpf_htons(ETH_P_8021AD)) {
                vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
                if (!vlan)
                        return BPF_DROP;
@@ -352,6 +344,7 @@ PROG(VLAN)(struct __sk_buff *skb)
                if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
                        return BPF_DROP;
 
+               keys->nhoff += sizeof(*vlan);
                keys->thoff += sizeof(*vlan);
        }
 
@@ -359,12 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb)
        if (!vlan)
                return BPF_DROP;
 
+       keys->nhoff += sizeof(*vlan);
        keys->thoff += sizeof(*vlan);
        /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
        if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
            vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
                return BPF_DROP;
 
+       keys->n_proto = vlan->h_vlan_encapsulated_proto;
        return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto);
 }
 
index de1a43e8f61070220c511b9aae698e4de7fb0659..37328f1485384b756c3120f428a3348206a9eb47 100644 (file)
@@ -8,38 +8,51 @@
 #include "bpf_helpers.h"
 #include "bpf_endian.h"
 
-enum bpf_array_idx {
-       SRV_IDX,
-       CLI_IDX,
-       __NR_BPF_ARRAY_IDX,
+enum bpf_addr_array_idx {
+       ADDR_SRV_IDX,
+       ADDR_CLI_IDX,
+       __NR_BPF_ADDR_ARRAY_IDX,
+};
+
+enum bpf_result_array_idx {
+       EGRESS_SRV_IDX,
+       EGRESS_CLI_IDX,
+       INGRESS_LISTEN_IDX,
+       __NR_BPF_RESULT_ARRAY_IDX,
+};
+
+enum bpf_linum_array_idx {
+       EGRESS_LINUM_IDX,
+       INGRESS_LINUM_IDX,
+       __NR_BPF_LINUM_ARRAY_IDX,
 };
 
 struct bpf_map_def SEC("maps") addr_map = {
        .type = BPF_MAP_TYPE_ARRAY,
        .key_size = sizeof(__u32),
        .value_size = sizeof(struct sockaddr_in6),
-       .max_entries = __NR_BPF_ARRAY_IDX,
+       .max_entries = __NR_BPF_ADDR_ARRAY_IDX,
 };
 
 struct bpf_map_def SEC("maps") sock_result_map = {
        .type = BPF_MAP_TYPE_ARRAY,
        .key_size = sizeof(__u32),
        .value_size = sizeof(struct bpf_sock),
-       .max_entries = __NR_BPF_ARRAY_IDX,
+       .max_entries = __NR_BPF_RESULT_ARRAY_IDX,
 };
 
 struct bpf_map_def SEC("maps") tcp_sock_result_map = {
        .type = BPF_MAP_TYPE_ARRAY,
        .key_size = sizeof(__u32),
        .value_size = sizeof(struct bpf_tcp_sock),
-       .max_entries = __NR_BPF_ARRAY_IDX,
+       .max_entries = __NR_BPF_RESULT_ARRAY_IDX,
 };
 
 struct bpf_map_def SEC("maps") linum_map = {
        .type = BPF_MAP_TYPE_ARRAY,
        .key_size = sizeof(__u32),
        .value_size = sizeof(__u32),
-       .max_entries = 1,
+       .max_entries = __NR_BPF_LINUM_ARRAY_IDX,
 };
 
 static bool is_loopback6(__u32 *a6)
@@ -100,18 +113,20 @@ static void tpcpy(struct bpf_tcp_sock *dst,
 
 #define RETURN {                                               \
        linum = __LINE__;                                       \
-       bpf_map_update_elem(&linum_map, &idx0, &linum, 0);      \
+       bpf_map_update_elem(&linum_map, &linum_idx, &linum, 0); \
        return 1;                                               \
 }
 
 SEC("cgroup_skb/egress")
-int read_sock_fields(struct __sk_buff *skb)
+int egress_read_sock_fields(struct __sk_buff *skb)
 {
-       __u32 srv_idx = SRV_IDX, cli_idx = CLI_IDX, idx;
+       __u32 srv_idx = ADDR_SRV_IDX, cli_idx = ADDR_CLI_IDX, result_idx;
        struct sockaddr_in6 *srv_sa6, *cli_sa6;
        struct bpf_tcp_sock *tp, *tp_ret;
        struct bpf_sock *sk, *sk_ret;
-       __u32 linum, idx0 = 0;
+       __u32 linum, linum_idx;
+
+       linum_idx = EGRESS_LINUM_IDX;
 
        sk = skb->sk;
        if (!sk || sk->state == 10)
@@ -132,14 +147,55 @@ int read_sock_fields(struct __sk_buff *skb)
                RETURN;
 
        if (sk->src_port == bpf_ntohs(srv_sa6->sin6_port))
-               idx = srv_idx;
+               result_idx = EGRESS_SRV_IDX;
        else if (sk->src_port == bpf_ntohs(cli_sa6->sin6_port))
-               idx = cli_idx;
+               result_idx = EGRESS_CLI_IDX;
        else
                RETURN;
 
-       sk_ret = bpf_map_lookup_elem(&sock_result_map, &idx);
-       tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &idx);
+       sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx);
+       tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx);
+       if (!sk_ret || !tp_ret)
+               RETURN;
+
+       skcpy(sk_ret, sk);
+       tpcpy(tp_ret, tp);
+
+       RETURN;
+}
+
+SEC("cgroup_skb/ingress")
+int ingress_read_sock_fields(struct __sk_buff *skb)
+{
+       __u32 srv_idx = ADDR_SRV_IDX, result_idx = INGRESS_LISTEN_IDX;
+       struct bpf_tcp_sock *tp, *tp_ret;
+       struct bpf_sock *sk, *sk_ret;
+       struct sockaddr_in6 *srv_sa6;
+       __u32 linum, linum_idx;
+
+       linum_idx = INGRESS_LINUM_IDX;
+
+       sk = skb->sk;
+       if (!sk || sk->family != AF_INET6 || !is_loopback6(sk->src_ip6))
+               RETURN;
+
+       srv_sa6 = bpf_map_lookup_elem(&addr_map, &srv_idx);
+       if (!srv_sa6 || sk->src_port != bpf_ntohs(srv_sa6->sin6_port))
+               RETURN;
+
+       if (sk->state != 10 && sk->state != 12)
+               RETURN;
+
+       sk = bpf_get_listener_sock(sk);
+       if (!sk)
+               RETURN;
+
+       tp = bpf_tcp_sock(sk);
+       if (!tp)
+               RETURN;
+
+       sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx);
+       tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx);
        if (!sk_ret || !tp_ret)
                RETURN;
 
index 38797aa627a732f31d333aaa6cf8020ceb3a211d..ec5794e4205bc04dabcaa20532cfe4cd2fcf6bbd 100644 (file)
@@ -5776,6 +5776,53 @@ const struct btf_dedup_test dedup_tests[] = {
                .dedup_table_size = 1, /* force hash collisions */
        },
 },
+{
+       .descr = "dedup: void equiv check",
+       /*
+        * // CU 1:
+        * struct s {
+        *      struct {} *x;
+        * };
+        * // CU 2:
+        * struct s {
+        *      int *x;
+        * };
+        */
+       .input = {
+               .raw_types = {
+                       /* CU 1 */
+                       BTF_STRUCT_ENC(0, 0, 1),                                /* [1] struct {}  */
+                       BTF_PTR_ENC(1),                                         /* [2] ptr -> [1] */
+                       BTF_STRUCT_ENC(NAME_NTH(1), 1, 8),                      /* [3] struct s   */
+                               BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+                       /* CU 2 */
+                       BTF_PTR_ENC(0),                                         /* [4] ptr -> void */
+                       BTF_STRUCT_ENC(NAME_NTH(1), 1, 8),                      /* [5] struct s   */
+                               BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
+                       BTF_END_RAW,
+               },
+               BTF_STR_SEC("\0s\0x"),
+       },
+       .expect = {
+               .raw_types = {
+                       /* CU 1 */
+                       BTF_STRUCT_ENC(0, 0, 1),                                /* [1] struct {}  */
+                       BTF_PTR_ENC(1),                                         /* [2] ptr -> [1] */
+                       BTF_STRUCT_ENC(NAME_NTH(1), 1, 8),                      /* [3] struct s   */
+                               BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+                       /* CU 2 */
+                       BTF_PTR_ENC(0),                                         /* [4] ptr -> void */
+                       BTF_STRUCT_ENC(NAME_NTH(1), 1, 8),                      /* [5] struct s   */
+                               BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
+                       BTF_END_RAW,
+               },
+               BTF_STR_SEC("\0s\0x"),
+       },
+       .opts = {
+               .dont_resolve_fwds = false,
+               .dedup_table_size = 1, /* force hash collisions */
+       },
+},
 {
        .descr = "dedup: all possible kinds (no duplicates)",
        .input = {
@@ -5874,6 +5921,50 @@ const struct btf_dedup_test dedup_tests[] = {
                .dont_resolve_fwds = false,
        },
 },
+{
+       .descr = "dedup: enum fwd resolution",
+       .input = {
+               .raw_types = {
+                       /* [1] fwd enum 'e1' before full enum */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
+                       /* [2] full enum 'e1' after fwd */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(2), 123),
+                       /* [3] full enum 'e2' before fwd */
+                       BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(4), 456),
+                       /* [4] fwd enum 'e2' after full enum */
+                       BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
+                       /* [5] incompatible fwd enum with different size */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
+                       /* [6] incompatible full enum with different value */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(2), 321),
+                       BTF_END_RAW,
+               },
+               BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
+       },
+       .expect = {
+               .raw_types = {
+                       /* [1] full enum 'e1' */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(2), 123),
+                       /* [2] full enum 'e2' */
+                       BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(4), 456),
+                       /* [3] incompatible fwd enum with different size */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
+                       /* [4] incompatible full enum with different value */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(2), 321),
+                       BTF_END_RAW,
+               },
+               BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
+       },
+       .opts = {
+               .dont_resolve_fwds = false,
+       },
+},
 
 };
 
index bc8943938bf53933577c9be075c2f35314708eb3..dcae7f664dce0827f5ddda4bc5f693d191def9b0 100644 (file)
 #include "cgroup_helpers.h"
 #include "bpf_rlimit.h"
 
-enum bpf_array_idx {
-       SRV_IDX,
-       CLI_IDX,
-       __NR_BPF_ARRAY_IDX,
+enum bpf_addr_array_idx {
+       ADDR_SRV_IDX,
+       ADDR_CLI_IDX,
+       __NR_BPF_ADDR_ARRAY_IDX,
+};
+
+enum bpf_result_array_idx {
+       EGRESS_SRV_IDX,
+       EGRESS_CLI_IDX,
+       INGRESS_LISTEN_IDX,
+       __NR_BPF_RESULT_ARRAY_IDX,
+};
+
+enum bpf_linum_array_idx {
+       EGRESS_LINUM_IDX,
+       INGRESS_LINUM_IDX,
+       __NR_BPF_LINUM_ARRAY_IDX,
 };
 
 #define CHECK(condition, tag, format...) ({                            \
@@ -41,8 +54,16 @@ static int linum_map_fd;
 static int addr_map_fd;
 static int tp_map_fd;
 static int sk_map_fd;
-static __u32 srv_idx = SRV_IDX;
-static __u32 cli_idx = CLI_IDX;
+
+static __u32 addr_srv_idx = ADDR_SRV_IDX;
+static __u32 addr_cli_idx = ADDR_CLI_IDX;
+
+static __u32 egress_srv_idx = EGRESS_SRV_IDX;
+static __u32 egress_cli_idx = EGRESS_CLI_IDX;
+static __u32 ingress_listen_idx = INGRESS_LISTEN_IDX;
+
+static __u32 egress_linum_idx = EGRESS_LINUM_IDX;
+static __u32 ingress_linum_idx = INGRESS_LINUM_IDX;
 
 static void init_loopback6(struct sockaddr_in6 *sa6)
 {
@@ -93,29 +114,46 @@ static void print_tp(const struct bpf_tcp_sock *tp)
 
 static void check_result(void)
 {
-       struct bpf_tcp_sock srv_tp, cli_tp;
-       struct bpf_sock srv_sk, cli_sk;
-       __u32 linum, idx0 = 0;
+       struct bpf_tcp_sock srv_tp, cli_tp, listen_tp;
+       struct bpf_sock srv_sk, cli_sk, listen_sk;
+       __u32 ingress_linum, egress_linum;
        int err;
 
-       err = bpf_map_lookup_elem(linum_map_fd, &idx0, &linum);
+       err = bpf_map_lookup_elem(linum_map_fd, &egress_linum_idx,
+                                 &egress_linum);
        CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
              "err:%d errno:%d", err, errno);
 
-       err = bpf_map_lookup_elem(sk_map_fd, &srv_idx, &srv_sk);
-       CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &srv_idx)",
+       err = bpf_map_lookup_elem(linum_map_fd, &ingress_linum_idx,
+                                 &ingress_linum);
+       CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
+             "err:%d errno:%d", err, errno);
+
+       err = bpf_map_lookup_elem(sk_map_fd, &egress_srv_idx, &srv_sk);
+       CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &egress_srv_idx)",
+             "err:%d errno:%d", err, errno);
+       err = bpf_map_lookup_elem(tp_map_fd, &egress_srv_idx, &srv_tp);
+       CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &egress_srv_idx)",
+             "err:%d errno:%d", err, errno);
+
+       err = bpf_map_lookup_elem(sk_map_fd, &egress_cli_idx, &cli_sk);
+       CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &egress_cli_idx)",
              "err:%d errno:%d", err, errno);
-       err = bpf_map_lookup_elem(tp_map_fd, &srv_idx, &srv_tp);
-       CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &srv_idx)",
+       err = bpf_map_lookup_elem(tp_map_fd, &egress_cli_idx, &cli_tp);
+       CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &egress_cli_idx)",
              "err:%d errno:%d", err, errno);
 
-       err = bpf_map_lookup_elem(sk_map_fd, &cli_idx, &cli_sk);
-       CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &cli_idx)",
+       err = bpf_map_lookup_elem(sk_map_fd, &ingress_listen_idx, &listen_sk);
+       CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &ingress_listen_idx)",
              "err:%d errno:%d", err, errno);
-       err = bpf_map_lookup_elem(tp_map_fd, &cli_idx, &cli_tp);
-       CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &cli_idx)",
+       err = bpf_map_lookup_elem(tp_map_fd, &ingress_listen_idx, &listen_tp);
+       CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &ingress_listen_idx)",
              "err:%d errno:%d", err, errno);
 
+       printf("listen_sk: ");
+       print_sk(&listen_sk);
+       printf("\n");
+
        printf("srv_sk: ");
        print_sk(&srv_sk);
        printf("\n");
@@ -124,6 +162,10 @@ static void check_result(void)
        print_sk(&cli_sk);
        printf("\n");
 
+       printf("listen_tp: ");
+       print_tp(&listen_tp);
+       printf("\n");
+
        printf("srv_tp: ");
        print_tp(&srv_tp);
        printf("\n");
@@ -132,6 +174,19 @@ static void check_result(void)
        print_tp(&cli_tp);
        printf("\n");
 
+       CHECK(listen_sk.state != 10 ||
+             listen_sk.family != AF_INET6 ||
+             listen_sk.protocol != IPPROTO_TCP ||
+             memcmp(listen_sk.src_ip6, &in6addr_loopback,
+                    sizeof(listen_sk.src_ip6)) ||
+             listen_sk.dst_ip6[0] || listen_sk.dst_ip6[1] ||
+             listen_sk.dst_ip6[2] || listen_sk.dst_ip6[3] ||
+             listen_sk.src_port != ntohs(srv_sa6.sin6_port) ||
+             listen_sk.dst_port,
+             "Unexpected listen_sk",
+             "Check listen_sk output. ingress_linum:%u",
+             ingress_linum);
+
        CHECK(srv_sk.state == 10 ||
              !srv_sk.state ||
              srv_sk.family != AF_INET6 ||
@@ -142,7 +197,8 @@ static void check_result(void)
                     sizeof(srv_sk.dst_ip6)) ||
              srv_sk.src_port != ntohs(srv_sa6.sin6_port) ||
              srv_sk.dst_port != cli_sa6.sin6_port,
-             "Unexpected srv_sk", "Check srv_sk output. linum:%u", linum);
+             "Unexpected srv_sk", "Check srv_sk output. egress_linum:%u",
+             egress_linum);
 
        CHECK(cli_sk.state == 10 ||
              !cli_sk.state ||
@@ -154,21 +210,31 @@ static void check_result(void)
                     sizeof(cli_sk.dst_ip6)) ||
              cli_sk.src_port != ntohs(cli_sa6.sin6_port) ||
              cli_sk.dst_port != srv_sa6.sin6_port,
-             "Unexpected cli_sk", "Check cli_sk output. linum:%u", linum);
+             "Unexpected cli_sk", "Check cli_sk output. egress_linum:%u",
+             egress_linum);
+
+       CHECK(listen_tp.data_segs_out ||
+             listen_tp.data_segs_in ||
+             listen_tp.total_retrans ||
+             listen_tp.bytes_acked,
+             "Unexpected listen_tp", "Check listen_tp output. ingress_linum:%u",
+             ingress_linum);
 
        CHECK(srv_tp.data_segs_out != 1 ||
              srv_tp.data_segs_in ||
              srv_tp.snd_cwnd != 10 ||
              srv_tp.total_retrans ||
              srv_tp.bytes_acked != DATA_LEN,
-             "Unexpected srv_tp", "Check srv_tp output. linum:%u", linum);
+             "Unexpected srv_tp", "Check srv_tp output. egress_linum:%u",
+             egress_linum);
 
        CHECK(cli_tp.data_segs_out ||
              cli_tp.data_segs_in != 1 ||
              cli_tp.snd_cwnd != 10 ||
              cli_tp.total_retrans ||
              cli_tp.bytes_received != DATA_LEN,
-             "Unexpected cli_tp", "Check cli_tp output. linum:%u", linum);
+             "Unexpected cli_tp", "Check cli_tp output. egress_linum:%u",
+             egress_linum);
 }
 
 static void test(void)
@@ -211,10 +277,10 @@ static void test(void)
              err, errno);
 
        /* Update addr_map with srv_sa6 and cli_sa6 */
-       err = bpf_map_update_elem(addr_map_fd, &srv_idx, &srv_sa6, 0);
+       err = bpf_map_update_elem(addr_map_fd, &addr_srv_idx, &srv_sa6, 0);
        CHECK(err, "map_update", "err:%d errno:%d", err, errno);
 
-       err = bpf_map_update_elem(addr_map_fd, &cli_idx, &cli_sa6, 0);
+       err = bpf_map_update_elem(addr_map_fd, &addr_cli_idx, &cli_sa6, 0);
        CHECK(err, "map_update", "err:%d errno:%d", err, errno);
 
        /* Connect from cli_sa6 to srv_sa6 */
@@ -273,9 +339,9 @@ int main(int argc, char **argv)
        struct bpf_prog_load_attr attr = {
                .file = "test_sock_fields_kern.o",
                .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
-               .expected_attach_type = BPF_CGROUP_INET_EGRESS,
        };
-       int cgroup_fd, prog_fd, err;
+       int cgroup_fd, egress_fd, ingress_fd, err;
+       struct bpf_program *ingress_prog;
        struct bpf_object *obj;
        struct bpf_map *map;
 
@@ -293,12 +359,24 @@ int main(int argc, char **argv)
        err = join_cgroup(TEST_CGROUP);
        CHECK(err, "join_cgroup", "err:%d errno:%d", err, errno);
 
-       err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
+       err = bpf_prog_load_xattr(&attr, &obj, &egress_fd);
        CHECK(err, "bpf_prog_load_xattr()", "err:%d", err);
 
-       err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0);
+       ingress_prog = bpf_object__find_program_by_title(obj,
+                                                        "cgroup_skb/ingress");
+       CHECK(!ingress_prog,
+             "bpf_object__find_program_by_title(cgroup_skb/ingress)",
+             "not found");
+       ingress_fd = bpf_program__fd(ingress_prog);
+
+       err = bpf_prog_attach(egress_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0);
        CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_EGRESS)",
              "err:%d errno%d", err, errno);
+
+       err = bpf_prog_attach(ingress_fd, cgroup_fd,
+                             BPF_CGROUP_INET_INGRESS, 0);
+       CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_INGRESS)",
+             "err:%d errno%d", err, errno);
        close(cgroup_fd);
 
        map = bpf_object__find_map_by_name(obj, "addr_map");
index 4004891afa9c3dd7969419bcd8d10d4b9b9541c8..9093a8f64dc6105b34c7f35d61e14bb18192c610 100644 (file)
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "calls: ptr null check in subprog",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+       .fixup_map_hash_48b = { 3 },
+       .result_unpriv = REJECT,
+       .result = ACCEPT,
+       .retval = 0,
+},
 {
        "calls: two calls with args",
        .insns = {
        .errstr = "call stack",
        .result = REJECT,
 },
+{
+       "calls: stack depth check in dead code",
+       .insns = {
+       /* main */
+       BPF_MOV64_IMM(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
+       BPF_EXIT_INSN(),
+       /* A */
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       /* B */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
+       BPF_EXIT_INSN(),
+       /* C */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
+       BPF_EXIT_INSN(),
+       /* D */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
+       BPF_EXIT_INSN(),
+       /* E */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
+       BPF_EXIT_INSN(),
+       /* F */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
+       BPF_EXIT_INSN(),
+       /* G */
+       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
+       BPF_EXIT_INSN(),
+       /* H */
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .errstr = "call stack",
+       .result = REJECT,
+},
 {
        "calls: spill into caller stack frame",
        .insns = {
        .errstr = "!read_ok",
        .result = REJECT,
 },
+{
+       "calls: cross frame pruning - liveness propagation",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_IMM(BPF_REG_8, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_MOV64_IMM(BPF_REG_8, 1),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_IMM(BPF_REG_9, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_MOV64_IMM(BPF_REG_9, 1),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
+       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+       .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+       .errstr = "!read_ok",
+       .result = REJECT,
+},
index e3fc22e672c2735760800c3f7da78e4f4e6570cc..d5c596fdc4b9a67127c36b56c3d24edd58348e2a 100644 (file)
        .errstr = "invalid access to packet",
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 },
+{
+       "direct packet access: test29 (reg > pkt_end in subprog)",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+                   offsetof(struct __sk_buff, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct __sk_buff, data_end)),
+       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
index 3ed3593bd8b61f4301b03fc9f06b97af4e8be17f..923f2110072d6f1f4a124824228c082d43f094d8 100644 (file)
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        .result = ACCEPT,
 },
+{
+       "reference tracking: use ptr from bpf_tcp_sock() after release",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "invalid mem access",
+},
+{
+       "reference tracking: use ptr from bpf_sk_fullsock() after release",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "invalid mem access",
+},
+{
+       "reference tracking: use ptr from bpf_sk_fullsock(tp) after release",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "invalid mem access",
+},
+{
+       "reference tracking: use sk after bpf_sk_release(tp)",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "invalid mem access",
+},
+{
+       "reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, src_port)),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = ACCEPT,
+},
+{
+       "reference tracking: bpf_sk_release(listen_sk)",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "reference has not been acquired before",
+},
+{
+       /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
+       "reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+       BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_8, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "invalid mem access",
+},
index 0ddfdf76aba5a56f387f9f43f567cc69009c672b..416436231fab011aeebfbed0f0f253ad25bef912 100644 (file)
        },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        .result = REJECT,
-       .errstr = "type=sock_common expected=sock",
+       .errstr = "reference has not been acquired before",
 },
 {
        "bpf_sk_release(bpf_sk_fullsock(skb->sk))",
        },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        .result = REJECT,
-       .errstr = "type=tcp_sock expected=sock",
+       .errstr = "reference has not been acquired before",
 },
index c4cf6e6d800ebe3d2d595805397ce1b3f70e7de3..a6c196c8534cea2a49d600aa23b39070bfd481f0 100755 (executable)
@@ -11,6 +11,7 @@ lib_dir=$(dirname $0)/../../../net/forwarding
 
 ALL_TESTS="
        rif_set_addr_test
+       rif_vrf_set_addr_test
        rif_inherit_bridge_addr_test
        rif_non_inherit_bridge_addr_test
        vlan_interface_deletion_test
@@ -98,6 +99,25 @@ rif_set_addr_test()
        ip link set dev $swp1 addr $swp1_mac
 }
 
+rif_vrf_set_addr_test()
+{
+       # Test that it is possible to set an IP address on a VRF upper despite
+       # its random MAC address.
+       RET=0
+
+       ip link add name vrf-test type vrf table 10
+       ip link set dev $swp1 master vrf-test
+
+       ip -4 address add 192.0.2.1/24 dev vrf-test
+       check_err $? "failed to set IPv4 address on VRF"
+       ip -6 address add 2001:db8:1::1/64 dev vrf-test
+       check_err $? "failed to set IPv6 address on VRF"
+
+       log_test "RIF - setting IP address on VRF"
+
+       ip link del dev vrf-test
+}
+
 rif_inherit_bridge_addr_test()
 {
        RET=0
index 3c1f4bdf90000c7f163fd72a2974bfabf982c47f..f8588cca2bef4bfe4d3cdf2afdb6586f21e67894 100644 (file)
@@ -1,3 +1,5 @@
+include ../../../../scripts/Kbuild.include
+
 all:
 
 top_srcdir = ../../../..
@@ -17,6 +19,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/state_test
 TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
 TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
+TEST_GEN_PROGS_x86_64 += x86_64/smm_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
 TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
 
@@ -29,8 +32,12 @@ LIBKVM += $(LIBKVM_$(UNAME_M))
 INSTALL_HDR_PATH = $(top_srcdir)/usr
 LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
 LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
-CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
-LDFLAGS += -pthread
+CFLAGS += -O2 -g -std=gnu99 -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
+
+no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
+        $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
+
+LDFLAGS += -pthread $(no-pie-option)
 
 # After inclusion, $(OUTPUT) is defined and
 # $(TEST_GEN_PROGS) starts with $(OUTPUT)/
index 4715cfba20dce6b58f1141bcee97680dfd9aa35f..93f99c6b7d79ee11964457b5a845cef4380bab27 100644 (file)
@@ -288,8 +288,11 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
 #endif
        max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1;
        guest_page_size = (1ul << guest_page_shift);
-       /* 1G of guest page sized pages */
-       guest_num_pages = (1ul << (30 - guest_page_shift));
+       /*
+        * A little more than 1G of guest page sized pages.  Cover the
+        * case where the size is not aligned to 64 pages.
+        */
+       guest_num_pages = (1ul << (30 - guest_page_shift)) + 3;
        host_page_size = getpagesize();
        host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
                         !!((guest_num_pages * guest_page_size) % host_page_size);
@@ -359,7 +362,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
                kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
 #ifdef USE_CLEAR_DIRTY_LOG
                kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
-                                      DIV_ROUND_UP(host_num_pages, 64) * 64);
+                                      host_num_pages);
 #endif
                vm_dirty_log_verify(bmap);
                iteration++;
index a84785b0255776eb67b5df72df87575710f6cfaa..07b71ad9734af57f101f0f96fdc57b741b41b0e7 100644 (file)
@@ -102,6 +102,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
 struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
 void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
 int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
+void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
 void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
                       struct kvm_mp_state *mp_state);
 void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
index e2884c2b81fff80c1ec6c261828dbb0493b3e98b..6063d5b2f3561c450778f86f3d1474390d79b5ec 100644 (file)
@@ -778,6 +778,33 @@ void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
 #define MSR_IA32_APICBASE_ENABLE       (1<<11)
 #define MSR_IA32_APICBASE_BASE         (0xfffff<<12)
 
+#define APIC_BASE_MSR  0x800
+#define X2APIC_ENABLE  (1UL << 10)
+#define        APIC_ICR        0x300
+#define                APIC_DEST_SELF          0x40000
+#define                APIC_DEST_ALLINC        0x80000
+#define                APIC_DEST_ALLBUT        0xC0000
+#define                APIC_ICR_RR_MASK        0x30000
+#define                APIC_ICR_RR_INVALID     0x00000
+#define                APIC_ICR_RR_INPROG      0x10000
+#define                APIC_ICR_RR_VALID       0x20000
+#define                APIC_INT_LEVELTRIG      0x08000
+#define                APIC_INT_ASSERT         0x04000
+#define                APIC_ICR_BUSY           0x01000
+#define                APIC_DEST_LOGICAL       0x00800
+#define                APIC_DEST_PHYSICAL      0x00000
+#define                APIC_DM_FIXED           0x00000
+#define                APIC_DM_FIXED_MASK      0x00700
+#define                APIC_DM_LOWEST          0x00100
+#define                APIC_DM_SMI             0x00200
+#define                APIC_DM_REMRD           0x00300
+#define                APIC_DM_NMI             0x00400
+#define                APIC_DM_INIT            0x00500
+#define                APIC_DM_STARTUP         0x00600
+#define                APIC_DM_EXTINT          0x00700
+#define                APIC_VECTOR_MASK        0x000FF
+#define        APIC_ICR2       0x310
+
 #define MSR_IA32_TSCDEADLINE           0x000006e0
 
 #define MSR_IA32_UCODE_WRITE           0x00000079
index b52cfdefecbfe9f760b569baf757d2228fe0c6af..4ca96b228e46ba248476803583cb94d14410ff16 100644 (file)
@@ -91,6 +91,11 @@ static void vm_open(struct kvm_vm *vm, int perm, unsigned long type)
        if (vm->kvm_fd < 0)
                exit(KSFT_SKIP);
 
+       if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
+               fprintf(stderr, "immediate_exit not available, skipping test\n");
+               exit(KSFT_SKIP);
+       }
+
        vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, type);
        TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
                "rc: %i errno: %i", vm->fd, errno);
@@ -1121,6 +1126,22 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
        return rc;
 }
 
+void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
+{
+       struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+       int ret;
+
+       TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+
+       vcpu->state->immediate_exit = 1;
+       ret = ioctl(vcpu->fd, KVM_RUN, NULL);
+       vcpu->state->immediate_exit = 0;
+
+       TEST_ASSERT(ret == -1 && errno == EINTR,
+                   "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
+                   ret, errno);
+}
+
 /*
  * VM VCPU Set MP State
  *
index f28127f4a3af63cb9ac15d2124f425e7492fccda..dc7fae9fa424cf2b45fb7acf10c4b58c272763a0 100644 (file)
@@ -1030,6 +1030,14 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
                            nested_size, sizeof(state->nested_));
        }
 
+       /*
+        * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
+        * guest state is consistent only after userspace re-enters the
+        * kernel with KVM_RUN.  Complete IO prior to migrating state
+        * to a new VM.
+        */
+       vcpu_run_complete_io(vm, vcpuid);
+
        nmsrs = kvm_get_num_msrs(vm);
        list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
        list->nmsrs = nmsrs;
@@ -1093,12 +1101,6 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
        struct vcpu *vcpu = vcpu_find(vm, vcpuid);
        int r;
 
-       if (state->nested.size) {
-               r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
-               TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
-                       r);
-       }
-
        r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave);
         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
                 r);
@@ -1130,4 +1132,10 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
        r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs);
         TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i",
                 r);
+
+       if (state->nested.size) {
+               r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
+               TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
+                       r);
+       }
 }
index d503a51fad307526397f24a333080065e1ab4e1b..7c2c4d4055a80bf49bc56870a1ba828df8ee577d 100644 (file)
@@ -87,22 +87,25 @@ int main(int argc, char *argv[])
        while (1) {
                rc = _vcpu_run(vm, VCPU_ID);
 
-               if (run->exit_reason == KVM_EXIT_IO) {
-                       switch (get_ucall(vm, VCPU_ID, &uc)) {
-                       case UCALL_SYNC:
-                               /* emulate hypervisor clearing CR4.OSXSAVE */
-                               vcpu_sregs_get(vm, VCPU_ID, &sregs);
-                               sregs.cr4 &= ~X86_CR4_OSXSAVE;
-                               vcpu_sregs_set(vm, VCPU_ID, &sregs);
-                               break;
-                       case UCALL_ABORT:
-                               TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
-                               break;
-                       case UCALL_DONE:
-                               goto done;
-                       default:
-                               TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
-                       }
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                           "Unexpected exit reason: %u (%s),\n",
+                           run->exit_reason,
+                           exit_reason_str(run->exit_reason));
+
+               switch (get_ucall(vm, VCPU_ID, &uc)) {
+               case UCALL_SYNC:
+                       /* emulate hypervisor clearing CR4.OSXSAVE */
+                       vcpu_sregs_get(vm, VCPU_ID, &sregs);
+                       sregs.cr4 &= ~X86_CR4_OSXSAVE;
+                       vcpu_sregs_set(vm, VCPU_ID, &sregs);
+                       break;
+               case UCALL_ABORT:
+                       TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
+                       break;
+               case UCALL_DONE:
+                       goto done;
+               default:
+                       TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
                }
        }
 
index c49c2a28b0eb290ccd6c51498a0b9fd716b58b07..36669684eca58a6c09140453f70a403cf0119348 100644 (file)
@@ -123,8 +123,6 @@ int main(int argc, char *argv[])
                            stage, run->exit_reason,
                            exit_reason_str(run->exit_reason));
 
-               memset(&regs1, 0, sizeof(regs1));
-               vcpu_regs_get(vm, VCPU_ID, &regs1);
                switch (get_ucall(vm, VCPU_ID, &uc)) {
                case UCALL_ABORT:
                        TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
@@ -144,6 +142,9 @@ int main(int argc, char *argv[])
                            stage, (ulong)uc.args[1]);
 
                state = vcpu_save_state(vm, VCPU_ID);
+               memset(&regs1, 0, sizeof(regs1));
+               vcpu_regs_get(vm, VCPU_ID, &regs1);
+
                kvm_vm_release(vm);
 
                /* Restore state in a new VM.  */
index 264425f75806b9e41e5bcf69d34a59c47889a146..9a21e912097c4c41d66873af36160389b8b81956 100644 (file)
@@ -141,7 +141,13 @@ int main(int argc, char *argv[])
 
        free(hv_cpuid_entries);
 
-       vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
+       rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
+
+       if (rv) {
+               fprintf(stderr,
+                       "Enlightened VMCS is unsupported, skip related test\n");
+               goto vm_free;
+       }
 
        hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
        if (!hv_cpuid_entries)
@@ -151,6 +157,7 @@ int main(int argc, char *argv[])
 
        free(hv_cpuid_entries);
 
+vm_free:
        kvm_vm_free(vm);
 
        return 0;
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
new file mode 100644 (file)
index 0000000..fb80869
--- /dev/null
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018, Red Hat, Inc.
+ *
+ * Tests for SMM.
+ */
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+
+#include "vmx.h"
+
+#define VCPU_ID              1
+
+#define PAGE_SIZE  4096
+
+#define SMRAM_SIZE 65536
+#define SMRAM_MEMSLOT ((1 << 16) | 1)
+#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
+#define SMRAM_GPA 0x1000000
+#define SMRAM_STAGE 0xfe
+
+#define STR(x) #x
+#define XSTR(s) STR(s)
+
+#define SYNC_PORT 0xe
+#define DONE 0xff
+
+/*
+ * This is compiled as normal 64-bit code, however, SMI handler is executed
+ * in real-address mode. To stay simple we're limiting ourselves to a mode
+ * independent subset of asm here.
+ * SMI handler always report back fixed stage SMRAM_STAGE.
+ */
+uint8_t smi_handler[] = {
+       0xb0, SMRAM_STAGE,    /* mov $SMRAM_STAGE, %al */
+       0xe4, SYNC_PORT,      /* in $SYNC_PORT, %al */
+       0x0f, 0xaa,           /* rsm */
+};
+
+void sync_with_host(uint64_t phase)
+{
+       asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
+                    : : "a" (phase));
+}
+
+void self_smi(void)
+{
+       wrmsr(APIC_BASE_MSR + (APIC_ICR >> 4),
+             APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
+}
+
+void guest_code(struct vmx_pages *vmx_pages)
+{
+       uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
+
+       sync_with_host(1);
+
+       wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);
+
+       sync_with_host(2);
+
+       self_smi();
+
+       sync_with_host(4);
+
+       if (vmx_pages) {
+               GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+
+               sync_with_host(5);
+
+               self_smi();
+
+               sync_with_host(7);
+       }
+
+       sync_with_host(DONE);
+}
+
+int main(int argc, char *argv[])
+{
+       struct vmx_pages *vmx_pages = NULL;
+       vm_vaddr_t vmx_pages_gva = 0;
+
+       struct kvm_regs regs;
+       struct kvm_vm *vm;
+       struct kvm_run *run;
+       struct kvm_x86_state *state;
+       int stage, stage_reported;
+
+       /* Create VM */
+       vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+       run = vcpu_state(vm, VCPU_ID);
+
+       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
+                                   SMRAM_MEMSLOT, SMRAM_PAGES, 0);
+       TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
+                   == SMRAM_GPA, "could not allocate guest physical addresses?");
+
+       memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);
+       memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
+              sizeof(smi_handler));
+
+       vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);
+
+       if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
+               vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
+               vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+       } else {
+               printf("will skip SMM test with VMX enabled\n");
+               vcpu_args_set(vm, VCPU_ID, 1, 0);
+       }
+
+       for (stage = 1;; stage++) {
+               _vcpu_run(vm, VCPU_ID);
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                           "Stage %d: unexpected exit reason: %u (%s),\n",
+                           stage, run->exit_reason,
+                           exit_reason_str(run->exit_reason));
+
+               memset(&regs, 0, sizeof(regs));
+               vcpu_regs_get(vm, VCPU_ID, &regs);
+
+               stage_reported = regs.rax & 0xff;
+
+               if (stage_reported == DONE)
+                       goto done;
+
+               TEST_ASSERT(stage_reported == stage ||
+                           stage_reported == SMRAM_STAGE,
+                           "Unexpected stage: #%x, got %x",
+                           stage, stage_reported);
+
+               state = vcpu_save_state(vm, VCPU_ID);
+               kvm_vm_release(vm);
+               kvm_vm_restart(vm, O_RDWR);
+               vm_vcpu_add(vm, VCPU_ID, 0, 0);
+               vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+               vcpu_load_state(vm, VCPU_ID, state);
+               run = vcpu_state(vm, VCPU_ID);
+               free(state);
+       }
+
+done:
+       kvm_vm_free(vm);
+}
index 4b3f556265f1b95790e318da95adfb4073526514..e0a3c0204b7cd11c5da7024bea68f0da71e41bab 100644 (file)
@@ -156,8 +156,6 @@ int main(int argc, char *argv[])
                            stage, run->exit_reason,
                            exit_reason_str(run->exit_reason));
 
-               memset(&regs1, 0, sizeof(regs1));
-               vcpu_regs_get(vm, VCPU_ID, &regs1);
                switch (get_ucall(vm, VCPU_ID, &uc)) {
                case UCALL_ABORT:
                        TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
@@ -177,6 +175,9 @@ int main(int argc, char *argv[])
                            stage, (ulong)uc.args[1]);
 
                state = vcpu_save_state(vm, VCPU_ID);
+               memset(&regs1, 0, sizeof(regs1));
+               vcpu_regs_get(vm, VCPU_ID, &regs1);
+
                kvm_vm_release(vm);
 
                /* Restore state in a new VM.  */
index d4cfb6a7a086d57eda4e0854b8e8ea1fe38d4527..4b7e107865bf3cef0faebbb0dd82fe3a0eb8575d 100755 (executable)
@@ -27,6 +27,7 @@ log_test()
                nsuccess=$((nsuccess+1))
                printf "\n    TEST: %-50s  [ OK ]\n" "${msg}"
        else
+               ret=1
                nfail=$((nfail+1))
                printf "\n    TEST: %-50s  [FAIL]\n" "${msg}"
                if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
@@ -147,8 +148,8 @@ fib_rule6_test()
 
        fib_check_iproute_support "ipproto" "ipproto"
        if [ $? -eq 0 ]; then
-               match="ipproto icmp"
-               fib_rule6_test_match_n_redirect "$match" "$match" "ipproto icmp match"
+               match="ipproto ipv6-icmp"
+               fib_rule6_test_match_n_redirect "$match" "$match" "ipproto ipv6-icmp match"
        fi
 }
 
@@ -245,4 +246,9 @@ setup
 run_fibrule_tests
 cleanup
 
+if [ "$TESTS" != "none" ]; then
+       printf "\nTests passed: %3d\n" ${nsuccess}
+       printf "Tests failed: %3d\n"   ${nfail}
+fi
+
 exit $ret
index 1080ff55a788f720f240271741fbc38680061b7a..0d2a5f4f1e63829f3ca8dfcd8695b91409823f7f 100755 (executable)
@@ -605,6 +605,39 @@ run_cmd()
        return $rc
 }
 
+check_expected()
+{
+       local out="$1"
+       local expected="$2"
+       local rc=0
+
+       [ "${out}" = "${expected}" ] && return 0
+
+       if [ -z "${out}" ]; then
+               if [ "$VERBOSE" = "1" ]; then
+                       printf "\nNo route entry found\n"
+                       printf "Expected:\n"
+                       printf "    ${expected}\n"
+               fi
+               return 1
+       fi
+
+       # tricky way to convert output to 1-line without ip's
+       # messy '\'; this drops all extra white space
+       out=$(echo ${out})
+       if [ "${out}" != "${expected}" ]; then
+               rc=1
+               if [ "${VERBOSE}" = "1" ]; then
+                       printf "    Unexpected route entry. Have:\n"
+                       printf "        ${out}\n"
+                       printf "    Expected:\n"
+                       printf "        ${expected}\n\n"
+               fi
+       fi
+
+       return $rc
+}
+
 # add route for a prefix, flushing any existing routes first
 # expected to be the first step of a test
 add_route6()
@@ -652,31 +685,7 @@ check_route6()
        pfx=$1
 
        out=$($IP -6 ro ls match ${pfx} | sed -e 's/ pref medium//')
-       [ "${out}" = "${expected}" ] && return 0
-
-       if [ -z "${out}" ]; then
-               if [ "$VERBOSE" = "1" ]; then
-                       printf "\nNo route entry found\n"
-                       printf "Expected:\n"
-                       printf "    ${expected}\n"
-               fi
-               return 1
-       fi
-
-       # tricky way to convert output to 1-line without ip's
-       # messy '\'; this drops all extra white space
-       out=$(echo ${out})
-       if [ "${out}" != "${expected}" ]; then
-               rc=1
-               if [ "${VERBOSE}" = "1" ]; then
-                       printf "    Unexpected route entry. Have:\n"
-                       printf "        ${out}\n"
-                       printf "    Expected:\n"
-                       printf "        ${expected}\n\n"
-               fi
-       fi
-
-       return $rc
+       check_expected "${out}" "${expected}"
 }
 
 route_cleanup()
@@ -725,7 +734,7 @@ route_setup()
        ip -netns ns2 addr add 172.16.103.2/24 dev veth4
        ip -netns ns2 addr add 172.16.104.1/24 dev dummy1
 
-       set +ex
+       set +e
 }
 
 # assumption is that basic add of a single path route works
@@ -960,7 +969,8 @@ ipv6_addr_metric_test()
        run_cmd "$IP li set dev dummy2 down"
        rc=$?
        if [ $rc -eq 0 ]; then
-               check_route6 ""
+               out=$($IP -6 ro ls match 2001:db8:104::/64)
+               check_expected "${out}" ""
                rc=$?
        fi
        log_test $rc 0 "Prefix route removed on link down"
@@ -1091,38 +1101,13 @@ check_route()
        local pfx
        local expected="$1"
        local out
-       local rc=0
 
        set -- $expected
        pfx=$1
        [ "${pfx}" = "unreachable" ] && pfx=$2
 
        out=$($IP ro ls match ${pfx})
-       [ "${out}" = "${expected}" ] && return 0
-
-       if [ -z "${out}" ]; then
-               if [ "$VERBOSE" = "1" ]; then
-                       printf "\nNo route entry found\n"
-                       printf "Expected:\n"
-                       printf "    ${expected}\n"
-               fi
-               return 1
-       fi
-
-       # tricky way to convert output to 1-line without ip's
-       # messy '\'; this drops all extra white space
-       out=$(echo ${out})
-       if [ "${out}" != "${expected}" ]; then
-               rc=1
-               if [ "${VERBOSE}" = "1" ]; then
-                       printf "    Unexpected route entry. Have:\n"
-                       printf "        ${out}\n"
-                       printf "    Expected:\n"
-                       printf "        ${expected}\n\n"
-               fi
-       fi
-
-       return $rc
+       check_expected "${out}" "${expected}"
 }
 
 # assumption is that basic add of a single path route works
@@ -1387,7 +1372,8 @@ ipv4_addr_metric_test()
        run_cmd "$IP li set dev dummy2 down"
        rc=$?
        if [ $rc -eq 0 ]; then
-               check_route ""
+               out=$($IP ro ls match 172.16.104.0/24)
+               check_expected "${out}" ""
                rc=$?
        fi
        log_test $rc 0 "Prefix route removed on link down"
index 2dc95fda7ef76e7b723fb91d9a68f44bcb6a2897..ea5938ec009a5eb9e28cb1778e081a568e66fd65 100755 (executable)
@@ -6,12 +6,14 @@ if [ $(id -u) != 0 ]; then
        exit 0
 fi
 
+ret=0
 echo "--------------------"
 echo "running psock_fanout test"
 echo "--------------------"
 ./in_netns.sh ./psock_fanout
 if [ $? -ne 0 ]; then
        echo "[FAIL]"
+       ret=1
 else
        echo "[PASS]"
 fi
@@ -22,6 +24,7 @@ echo "--------------------"
 ./in_netns.sh ./psock_tpacket
 if [ $? -ne 0 ]; then
        echo "[FAIL]"
+       ret=1
 else
        echo "[PASS]"
 fi
@@ -32,6 +35,8 @@ echo "--------------------"
 ./in_netns.sh ./txring_overwrite
 if [ $? -ne 0 ]; then
        echo "[FAIL]"
+       ret=1
 else
        echo "[PASS]"
 fi
+exit $ret
index b093f39c298c3f4d7ee43eed7c58772860f55da4..14e41faf2c5740633f9dd30e500543647b736734 100755 (executable)
@@ -7,7 +7,7 @@ echo "--------------------"
 ./socket
 if [ $? -ne 0 ]; then
        echo "[FAIL]"
+       exit 1
 else
        echo "[PASS]"
 fi
-
index c9ff2b47bd1ca3a2f70ee0683cb2b79b170c74f5..a37cb1192c6a6bc6080c829b63768e6ba52f8dd1 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for netfilter selftests
 
-TEST_PROGS := nft_trans_stress.sh nft_nat.sh
+TEST_PROGS := nft_trans_stress.sh nft_nat.sh conntrack_icmp_related.sh
 
 include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
new file mode 100755 (executable)
index 0000000..b48e183
--- /dev/null
@@ -0,0 +1,283 @@
+#!/bin/bash
+#
+# check that ICMP df-needed/pkttoobig icmp are set are set as related
+# state
+#
+# Setup is:
+#
+# nsclient1 -> nsrouter1 -> nsrouter2 -> nsclient2
+# MTU 1500, except for nsrouter2 <-> nsclient2 link (1280).
+# ping nsclient2 from nsclient1, checking that conntrack did set RELATED
+# 'fragmentation needed' icmp packet.
+#
+# In addition, nsrouter1 will perform IP masquerading, i.e. also
+# check the icmp errors are propagated to the correct host as per
+# nat of "established" icmp-echo "connection".
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without nft tool"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+cleanup() {
+       for i in 1 2;do ip netns del nsclient$i;done
+       for i in 1 2;do ip netns del nsrouter$i;done
+}
+
+ipv4() {
+    echo -n 192.168.$1.2
+}
+
+ipv6 () {
+    echo -n dead:$1::2
+}
+
+check_counter()
+{
+       ns=$1
+       name=$2
+       expect=$3
+       local lret=0
+
+       cnt=$(ip netns exec $ns nft list counter inet filter "$name" | grep -q "$expect")
+       if [ $? -ne 0 ]; then
+               echo "ERROR: counter $name in $ns has unexpected value (expected $expect)" 1>&2
+               ip netns exec $ns nft list counter inet filter "$name" 1>&2
+               lret=1
+       fi
+
+       return $lret
+}
+
+check_unknown()
+{
+       expect="packets 0 bytes 0"
+       for n in nsclient1 nsclient2 nsrouter1 nsrouter2; do
+               check_counter $n "unknown" "$expect"
+               if [ $? -ne 0 ] ;then
+                       return 1
+               fi
+       done
+
+       return 0
+}
+
+for n in nsclient1 nsclient2 nsrouter1 nsrouter2; do
+  ip netns add $n
+  ip -net $n link set lo up
+done
+
+DEV=veth0
+ip link add $DEV netns nsclient1 type veth peer name eth1 netns nsrouter1
+DEV=veth0
+ip link add $DEV netns nsclient2 type veth peer name eth1 netns nsrouter2
+
+DEV=veth0
+ip link add $DEV netns nsrouter1 type veth peer name eth2 netns nsrouter2
+
+DEV=veth0
+for i in 1 2; do
+    ip -net nsclient$i link set $DEV up
+    ip -net nsclient$i addr add $(ipv4 $i)/24 dev $DEV
+    ip -net nsclient$i addr add $(ipv6 $i)/64 dev $DEV
+done
+
+ip -net nsrouter1 link set eth1 up
+ip -net nsrouter1 link set veth0 up
+
+ip -net nsrouter2 link set eth1 up
+ip -net nsrouter2 link set eth2 up
+
+ip -net nsclient1 route add default via 192.168.1.1
+ip -net nsclient1 -6 route add default via dead:1::1
+
+ip -net nsclient2 route add default via 192.168.2.1
+ip -net nsclient2 route add default via dead:2::1
+
+i=3
+ip -net nsrouter1 addr add 192.168.1.1/24 dev eth1
+ip -net nsrouter1 addr add 192.168.3.1/24 dev veth0
+ip -net nsrouter1 addr add dead:1::1/64 dev eth1
+ip -net nsrouter1 addr add dead:3::1/64 dev veth0
+ip -net nsrouter1 route add default via 192.168.3.10
+ip -net nsrouter1 -6 route add default via dead:3::10
+
+ip -net nsrouter2 addr add 192.168.2.1/24 dev eth1
+ip -net nsrouter2 addr add 192.168.3.10/24 dev eth2
+ip -net nsrouter2 addr add dead:2::1/64 dev eth1
+ip -net nsrouter2 addr add dead:3::10/64 dev eth2
+ip -net nsrouter2 route add default via 192.168.3.1
+ip -net nsrouter2 route add default via dead:3::1
+
+sleep 2
+for i in 4 6; do
+       ip netns exec nsrouter1 sysctl -q net.ipv$i.conf.all.forwarding=1
+       ip netns exec nsrouter2 sysctl -q net.ipv$i.conf.all.forwarding=1
+done
+
+for netns in nsrouter1 nsrouter2; do
+ip netns exec $netns nft -f - <<EOF
+table inet filter {
+       counter unknown { }
+       counter related { }
+       chain forward {
+               type filter hook forward priority 0; policy accept;
+               meta l4proto icmpv6 icmpv6 type "packet-too-big" ct state "related" counter name "related" accept
+               meta l4proto icmp icmp type "destination-unreachable" ct state "related" counter name "related" accept
+               meta l4proto { icmp, icmpv6 } ct state new,established accept
+               counter name "unknown" drop
+       }
+}
+EOF
+done
+
+ip netns exec nsclient1 nft -f - <<EOF
+table inet filter {
+       counter unknown { }
+       counter related { }
+       chain input {
+               type filter hook input priority 0; policy accept;
+               meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+               meta l4proto { icmp, icmpv6 } ct state "related" counter name "related" accept
+               counter name "unknown" drop
+       }
+}
+EOF
+
+ip netns exec nsclient2 nft -f - <<EOF
+table inet filter {
+       counter unknown { }
+       counter new { }
+       counter established { }
+
+       chain input {
+               type filter hook input priority 0; policy accept;
+               meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+               meta l4proto { icmp, icmpv6 } ct state "new" counter name "new" accept
+               meta l4proto { icmp, icmpv6 } ct state "established" counter name "established" accept
+               counter name "unknown" drop
+       }
+       chain output {
+               type filter hook output priority 0; policy accept;
+               meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+               meta l4proto { icmp, icmpv6 } ct state "new" counter name "new"
+               meta l4proto { icmp, icmpv6 } ct state "established" counter name "established"
+               counter name "unknown" drop
+       }
+}
+EOF
+
+
+# make sure NAT core rewrites adress of icmp error if nat is used according to
+# conntrack nat information (icmp error will be directed at nsrouter1 address,
+# but it needs to be routed to nsclient1 address).
+ip netns exec nsrouter1 nft -f - <<EOF
+table ip nat {
+       chain postrouting {
+               type nat hook postrouting priority 0; policy accept;
+               ip protocol icmp oifname "veth0" counter masquerade
+       }
+}
+table ip6 nat {
+       chain postrouting {
+               type nat hook postrouting priority 0; policy accept;
+               ip6 nexthdr icmpv6 oifname "veth0" counter masquerade
+       }
+}
+EOF
+
+ip netns exec nsrouter2 ip link set eth1  mtu 1280
+ip netns exec nsclient2 ip link set veth0 mtu 1280
+sleep 1
+
+ip netns exec nsclient1 ping -c 1 -s 1000 -q -M do 192.168.2.2 >/dev/null
+if [ $? -ne 0 ]; then
+       echo "ERROR: netns ip routing/connectivity broken" 1>&2
+       cleanup
+       exit 1
+fi
+ip netns exec nsclient1 ping6 -q -c 1 -s 1000 dead:2::2 >/dev/null
+if [ $? -ne 0 ]; then
+       echo "ERROR: netns ipv6 routing/connectivity broken" 1>&2
+       cleanup
+       exit 1
+fi
+
+check_unknown
+if [ $? -ne 0 ]; then
+       ret=1
+fi
+
+expect="packets 0 bytes 0"
+for netns in nsrouter1 nsrouter2 nsclient1;do
+       check_counter "$netns" "related" "$expect"
+       if [ $? -ne 0 ]; then
+               ret=1
+       fi
+done
+
+expect="packets 2 bytes 2076"
+check_counter nsclient2 "new" "$expect"
+if [ $? -ne 0 ]; then
+       ret=1
+fi
+
+ip netns exec nsclient1 ping -q -c 1 -s 1300 -M do 192.168.2.2 > /dev/null
+if [ $? -eq 0 ]; then
+       echo "ERROR: ping should have failed with PMTU too big error" 1>&2
+       ret=1
+fi
+
+# nsrouter2 should have generated the icmp error, so
+# related counter should be 0 (its in forward).
+expect="packets 0 bytes 0"
+check_counter "nsrouter2" "related" "$expect"
+if [ $? -ne 0 ]; then
+       ret=1
+fi
+
+# but nsrouter1 should have seen it, same for nsclient1.
+expect="packets 1 bytes 576"
+for netns in nsrouter1 nsclient1;do
+       check_counter "$netns" "related" "$expect"
+       if [ $? -ne 0 ]; then
+               ret=1
+       fi
+done
+
+ip netns exec nsclient1 ping6 -c 1 -s 1300 dead:2::2 > /dev/null
+if [ $? -eq 0 ]; then
+       echo "ERROR: ping6 should have failed with PMTU too big error" 1>&2
+       ret=1
+fi
+
+expect="packets 2 bytes 1856"
+for netns in nsrouter1 nsclient1;do
+       check_counter "$netns" "related" "$expect"
+       if [ $? -ne 0 ]; then
+               ret=1
+       fi
+done
+
+if [ $ret -eq 0 ];then
+       echo "PASS: icmp mtu error had RELATED state"
+else
+       echo "ERROR: icmp error RELATED state test has failed"
+fi
+
+cleanup
+exit $ret
index 8ec76681605cca08f8cad14720ada2986d74f76c..3194007cf8d1bf3f456d9e4594417ffe2f9d56fd 100755 (executable)
@@ -321,6 +321,7 @@ EOF
 
 test_masquerade6()
 {
+       local natflags=$1
        local lret=0
 
        ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
@@ -354,13 +355,13 @@ ip netns exec ns0 nft -f - <<EOF
 table ip6 nat {
        chain postrouting {
                type nat hook postrouting priority 0; policy accept;
-               meta oif veth0 masquerade
+               meta oif veth0 masquerade $natflags
        }
 }
 EOF
        ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
+               echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerade $natflags"
                lret=1
        fi
 
@@ -397,19 +398,26 @@ EOF
                fi
        done
 
+       ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+       if [ $? -ne 0 ] ; then
+               echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerade $natflags (attempt 2)"
+               lret=1
+       fi
+
        ip netns exec ns0 nft flush chain ip6 nat postrouting
        if [ $? -ne 0 ]; then
                echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
                lret=1
        fi
 
-       test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
+       test $lret -eq 0 && echo "PASS: IPv6 masquerade $natflags for ns2"
 
        return $lret
 }
 
 test_masquerade()
 {
+       local natflags=$1
        local lret=0
 
        ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
@@ -417,7 +425,7 @@ test_masquerade()
 
        ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: canot ping ns1 from ns2"
+               echo "ERROR: cannot ping ns1 from ns2 $natflags"
                lret=1
        fi
 
@@ -443,13 +451,13 @@ ip netns exec ns0 nft -f - <<EOF
 table ip nat {
        chain postrouting {
                type nat hook postrouting priority 0; policy accept;
-               meta oif veth0 masquerade
+               meta oif veth0 masquerade $natflags
        }
 }
 EOF
        ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
        if [ $? -ne 0 ] ; then
-               echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
+               echo "ERROR: cannot ping ns1 from ns2 with active ip masquere $natflags"
                lret=1
        fi
 
@@ -485,13 +493,19 @@ EOF
                fi
        done
 
+       ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+       if [ $? -ne 0 ] ; then
+               echo "ERROR: cannot ping ns1 from ns2 with active ip masquerade $natflags (attempt 2)"
+               lret=1
+       fi
+
        ip netns exec ns0 nft flush chain ip nat postrouting
        if [ $? -ne 0 ]; then
                echo "ERROR: Could not flush nat postrouting" 1>&2
                lret=1
        fi
 
-       test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
+       test $lret -eq 0 && echo "PASS: IP masquerade $natflags for ns2"
 
        return $lret
 }
@@ -750,8 +764,12 @@ test_local_dnat
 test_local_dnat6
 
 reset_counters
-test_masquerade
-test_masquerade6
+test_masquerade ""
+test_masquerade6 ""
+
+reset_counters
+test_masquerade "fully-random"
+test_masquerade6 "fully-random"
 
 reset_counters
 test_redirect
index 7202bbac976ea2b718952421b995ab593ce15fca..853aa164a401e054914cccf2a9663111e1c83939 100644 (file)
@@ -187,8 +187,8 @@ static int make_exe(const uint8_t *payload, size_t len)
        ph.p_offset = 0;
        ph.p_vaddr = VADDR;
        ph.p_paddr = 0;
-       ph.p_filesz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + sizeof(payload);
-       ph.p_memsz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + sizeof(payload);
+       ph.p_filesz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + len;
+       ph.p_memsz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + len;
        ph.p_align = 4096;
 
        fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_EXCL|O_TMPFILE, 0700);
index 762cb01f2ca719da36873484ef3a3489a6f71b3d..47b7473dedef74ccefead246ec19f20159c78958 100644 (file)
@@ -46,12 +46,9 @@ static void fail(const char *fmt, unsigned long a, unsigned long b)
 
 int main(void)
 {
-       const unsigned int PAGE_SIZE = sysconf(_SC_PAGESIZE);
-#ifdef __arm__
-       unsigned long va = 2 * PAGE_SIZE;
-#else
-       unsigned long va = 0;
-#endif
+       const int PAGE_SIZE = sysconf(_SC_PAGESIZE);
+       const unsigned long va_max = 1UL << 32;
+       unsigned long va;
        void *p;
        int fd;
        unsigned long a, b;
@@ -60,10 +57,13 @@ int main(void)
        if (fd == -1)
                return 1;
 
-       p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
-       if (p == MAP_FAILED) {
-               if (errno == EPERM)
-                       return 4;
+       for (va = 0; va < va_max; va += PAGE_SIZE) {
+               p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
+               if (p == (void *)va)
+                       break;
+       }
+       if (va == va_max) {
+               fprintf(stderr, "error: mmap doesn't like you\n");
                return 1;
        }
 
index 43540f1828cc9b350ab0c6af496d87944f98094c..2deea2169fc2df428ec4420f8800d3e8d7b967b7 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Extract the number of CPUs expected from the specified Kconfig-file
 # fragment by checking CONFIG_SMP and CONFIG_NR_CPUS.  If the specified
@@ -7,23 +8,9 @@
 #
 # Usage: configNR_CPUS.sh config-frag
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2013
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 cf=$1
 if test ! -r $cf
index ef7fcbac3d421ad6d171ab1fed57a5ee2d716221..90016c359e8393e0d1ad54f1dd76ee2b7966c5dd 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # config_override.sh base override
 #
@@ -6,23 +7,9 @@
 # that conflict with any in override, concatenating what remains and
 # sending the result to standard output.
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2017
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 base=$1
 if test -r $base
index 197deece7c7c12fb5a407fc0b0daf2a5bf670102..31584cee84d71b61b706c1b75dcb5f9e88c7aa9b 100755 (executable)
@@ -1,23 +1,11 @@
 #!/bin/bash
-# Usage: configcheck.sh .config .config-template
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0+
 #
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
+# Usage: configcheck.sh .config .config-template
 #
 # Copyright (C) IBM Corporation, 2011
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 T=${TMPDIR-/tmp}/abat-chk-config.sh.$$
 trap 'rm -rf $T' 0
@@ -26,6 +14,7 @@ mkdir $T
 cat $1 > $T/.config
 
 cat $2 | sed -e 's/\(.*\)=n/# \1 is not set/' -e 's/^#CHECK#//' |
+grep -v '^CONFIG_INITRAMFS_SOURCE' |
 awk    '
 {
                print "if grep -q \"" $0 "\" < '"$T/.config"'";
index 65541c21a5444abbad5face676e223c0934de39e..40359486b3a802ca565c4c0506d4bac3b90cbe65 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Usage: configinit.sh config-spec-file build-output-dir results-dir
 #
 # for example, "O=/tmp/foo".  If this argument is omitted, the .config
 # file will be generated directly in the current directory.
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2013
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 T=${TMPDIR-/tmp}/configinit.sh.$$
 trap 'rm -rf $T' 0
index bb99cde3f5f97a216cd85b4654ea0da3cc2b048b..ff7102212703167dc38b84f667425e9e2b19655d 100755 (executable)
@@ -1,26 +1,13 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Get an estimate of how CPU-hoggy to be.
 #
 # Usage: cpus2use.sh
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2013
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 ncpus=`grep '^processor' /proc/cpuinfo | wc -l`
 idlecpus=`mpstat | tail -1 | \
index 65f6655026f0e2dd03fc821c7fbb0925921f00b4..6bcb8b5b2ff223bcb553eb91cf51c09bfc661d74 100644 (file)
@@ -1,24 +1,11 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Shell functions for the rest of the scripts.
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2013
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 # bootparam_hotplug_cpu bootparam-string
 #
index 3633828375e3fb19cb18ce6476d1c26b58c368c7..435b609339854fddfce767c5aa8e93c6fc31e3bb 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Alternate sleeping and spinning on randomly selected CPUs.  The purpose
 # of this script is to inflict random OS jitter on a concurrently running
 # sleepmax: Maximum microseconds to sleep, defaults to one second.
 # spinmax: Maximum microseconds to spin, defaults to one millisecond.
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2016
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 me=$(($1 * 1000))
 duration=$2
index 9115fcdb5617cdcb7f72dbcf23bdd523f32528e6..c27a0bbb9c02e7d71cb80e897437d14748f02828 100755 (executable)
@@ -1,26 +1,13 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Build a kvm-ready Linux kernel from the tree in the current directory.
 #
 # Usage: kvm-build.sh config-template build-dir resdir
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2011
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 config_template=${1}
 if test -z "$config_template" -o ! -f "$config_template" -o ! -r "$config_template"
index 98f650c9bf54ab7972f94787f901ae948e17f631..8426fe1f15eeb1a3bce6a4f11687a5aa28ba0321 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Invoke a text editor on all console.log files for all runs with diagnostics,
 # that is, on all such files having a console.log.diags counterpart.
 #
 # The "directory" above should end with the date/time directory, for example,
 # "tools/testing/selftests/rcutorture/res/2018.02.25-14:27:27".
+#
+# Copyright (C) IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.ibm.com>
 
 rundir="${1}"
 if test -z "$rundir" -o ! -d "$rundir"
index 2de92f43ee8c2f819978b3a8ce4ccbb2ec4345fc..f3a7a5e2b89d49a0752a0714f64714f7eaf50b6d 100755 (executable)
@@ -1,26 +1,13 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Analyze a given results directory for locktorture progress.
 #
 # Usage: kvm-recheck-lock.sh resdir
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2014
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 i="$1"
 if test -d "$i" -a -r "$i"
index 0fa8a61ccb7b254baa29ea8fdf30b0dd28da2246..2a7f3f4756a740a67a48d60e74f58ef0c4fc70ca 100755 (executable)
@@ -1,26 +1,13 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Analyze a given results directory for rcutorture progress.
 #
 # Usage: kvm-recheck-rcu.sh resdir
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2014
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 i="$1"
 if test -d "$i" -a -r "$i"
index 8948f7926b21f14defd13cbe0e050e7b3a6a7c00..7d3c2be66c64484371f6fc0f0b9c4b6354cbe911 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Analyze a given results directory for rcuperf performance measurements,
 # looking for ftrace data.  Exits with 0 if data was found, analyzed, and
@@ -7,23 +8,9 @@
 #
 # Usage: kvm-recheck-rcuperf-ftrace.sh resdir
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2016
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 i="$1"
 . functions.sh
index ccebf772fa1e57b49894dc8768abae67849c73e6..db0375a57f281b91f9b53525d22e1303f489d79a 100755 (executable)
@@ -1,26 +1,13 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Analyze a given results directory for rcuperf performance measurements.
 #
 # Usage: kvm-recheck-rcuperf.sh resdir
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2016
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 i="$1"
 if test -d "$i" -a -r "$i"
index c9bab57a77ebafe98d01809f19508c7d589ccf97..2adde6aaafdbadb722d84bca43b9579d0d638e9a 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Given the results directories for previous KVM-based torture runs,
 # check the build and console output for errors.  Given a directory
@@ -6,23 +7,9 @@
 #
 # Usage: kvm-recheck.sh resdir ...
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2011
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
 . functions.sh
index 58ca758a5786f10fede4c099a098758202f7a906..0eb1ec16d78a1e2863bf1408f8b591d93ec7a47c 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Run a kvm-based test of the specified tree on the specified configs.
 # Fully automated run and error checking, no graphics console.
 #
 # More sophisticated argument parsing is clearly needed.
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2011
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 T=${TMPDIR-/tmp}/kvm-test-1-run.sh.$$
 trap 'rm -rf $T' 0
index 19864f1cb27a4299f03d840406d97489df13a98f..8f1e337b9b54e193ee701fe19f54175178632bdf 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Run a series of tests under KVM.  By default, this series is specified
 # by the relevant CFLIST file, but can be overridden by the --configs
@@ -6,23 +7,9 @@
 #
 # Usage: kvm.sh [ options ]
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2011
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 scriptname=$0
 args="$*"
index 83552bb007b4289590c64d91ad2a05bdd1753e4c..6fa9bd1ddc0940a357a8f8c6dc4f9c8c73ac0a77 100755 (executable)
@@ -1,21 +1,8 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Create an initrd directory if one does not already exist.
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2013
 #
 # Author: Connor Shu <Connor.Shu@ibm.com>
index 24fe5f822b28d9ddcd9ddebe9df4d0bdfebde704..0701b3bf6adea122e2792a23b107d4f0f44e5ae8 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Check the build output from an rcutorture run for goodness.
 # The "file" is a pathname on the local system, and "title" is
@@ -8,23 +9,9 @@
 #
 # Usage: parse-build.sh file title
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2011
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 F=$1
 title=$2
index 84933f6aed77818d7e368f171ad544141578493f..4508373a922fe2b962c076b37ac93a8a878f8a7d 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Check the console output from an rcutorture run for oopses.
 # The "file" is a pathname on the local system, and "title" is
@@ -6,23 +7,9 @@
 #
 # Usage: parse-console.sh file title
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2011
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 T=${TMPDIR-/tmp}/parse-console.sh.$$
 file="$1"
index 80eb646e13199c84ce943986263798138b9c88e0..d3e4b2971f9221129f4a7c3f4fed6b9d897358c0 100644 (file)
@@ -1,24 +1,11 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Kernel-version-dependent shell functions for the rest of the scripts.
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2014
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 # locktorture_param_onoff bootparam-string config-file
 #
index 7bab8246392bb21f982e3803f77860717513a084..effa415f9b9282880083d83a2ecadf996711b331 100644 (file)
@@ -1,24 +1,11 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Kernel-version-dependent shell functions for the rest of the scripts.
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2013
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 # rcutorture_param_n_barrier_cbs bootparam-string
 #
index d36b8fd6f0fc996312167c04445746d121cfe371..777d5b0c190fbeaa46c30b382a2bbf55d28407b3 100644 (file)
@@ -1,24 +1,11 @@
 #!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Torture-suite-dependent shell functions for the rest of the scripts.
 #
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
 # Copyright (C) IBM Corporation, 2015
 #
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
 
 # per_version_boot_params bootparam-string config-file seconds
 #
index f69d2ee29742808600d406c47f283d743b0aa7c8..5019cdae5d0b8ca8a47692c71956d9f824812466 100644 (file)
@@ -2166,11 +2166,14 @@ TEST(detect_seccomp_filter_flags)
                                 SECCOMP_FILTER_FLAG_LOG,
                                 SECCOMP_FILTER_FLAG_SPEC_ALLOW,
                                 SECCOMP_FILTER_FLAG_NEW_LISTENER };
-       unsigned int flag, all_flags;
+       unsigned int exclusive[] = {
+                               SECCOMP_FILTER_FLAG_TSYNC,
+                               SECCOMP_FILTER_FLAG_NEW_LISTENER };
+       unsigned int flag, all_flags, exclusive_mask;
        int i;
        long ret;
 
-       /* Test detection of known-good filter flags */
+       /* Test detection of individual known-good filter flags */
        for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
                int bits = 0;
 
@@ -2197,16 +2200,29 @@ TEST(detect_seccomp_filter_flags)
                all_flags |= flag;
        }
 
-       /* Test detection of all known-good filter flags */
-       ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL);
-       EXPECT_EQ(-1, ret);
-       EXPECT_EQ(EFAULT, errno) {
-               TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
-                      all_flags);
+       /*
+        * Test detection of all known-good filter flags combined. But
+        * for the exclusive flags we need to mask them out and try them
+        * individually for the "all flags" testing.
+        */
+       exclusive_mask = 0;
+       for (i = 0; i < ARRAY_SIZE(exclusive); i++)
+               exclusive_mask |= exclusive[i];
+       for (i = 0; i < ARRAY_SIZE(exclusive); i++) {
+               flag = all_flags & ~exclusive_mask;
+               flag |= exclusive[i];
+
+               ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
+               EXPECT_EQ(-1, ret);
+               EXPECT_EQ(EFAULT, errno) {
+                       TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
+                              flag);
+               }
        }
 
-       /* Test detection of an unknown filter flag */
+       /* Test detection of an unknown filter flags, without exclusives. */
        flag = -1;
+       flag &= ~exclusive_mask;
        ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
        EXPECT_EQ(-1, ret);
        EXPECT_EQ(EINVAL, errno) {
index 5970cee6d05f26fd9be36a5b1385d62083ab3cfe..b074ea9b6fe864b25720729ff1a8c18ef03dedfd 100644 (file)
         "teardown": [
             "$TC action flush action bpf"
         ]
+    },
+    {
+        "id": "b8a1",
+        "name": "Replace bpf action with invalid goto_chain control",
+        "category": [
+            "actions",
+            "bpf"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action bpf",
+                0,
+                1,
+                255
+            ],
+            "$TC action add action bpf bytecode '1,6 0 0 4294967295' pass index 90"
+        ],
+        "cmdUnderTest": "$TC action replace action bpf bytecode '1,6 0 0 4294967295' goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC action list action bpf",
+        "matchPattern": "action order [0-9]*: bpf.* default-action pass.*index 90",
+        "matchCount": "1",
+        "teardown": [
+            "$TC action flush action bpf"
+        ]
     }
 ]
index 13147a1f5731444abc28bd53950b3702c71835a5..cadde8f41fcd3db8cdaa21afd117cda720802b05 100644 (file)
         "teardown": [
             "$TC actions flush action connmark"
         ]
+    },
+    {
+        "id": "c506",
+        "name": "Replace connmark with invalid goto chain control",
+        "category": [
+            "actions",
+            "connmark"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action connmark",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action connmark pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action connmark goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action connmark index 90",
+        "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action connmark"
+        ]
     }
 ]
index a022792d392a9c93bf4c33d049ea7c817b85eded..ddabb2fbb7c72b4fa49787636c0b3502fb807aeb 100644 (file)
         "matchPattern": "^[ \t]+index [0-9]+ ref",
         "matchCount": "0",
         "teardown": []
+    },
+    {
+        "id": "d128",
+        "name": "Replace csum action with invalid goto chain control",
+        "category": [
+            "actions",
+            "csum"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action csum",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action csum iph index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action csum iph goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action csum index 90",
+        "matchPattern": "action order [0-9]*: csum \\(iph\\) action pass.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action csum"
+        ]
     }
 ]
index 89189a03ce3d431b817fcbdaef4eb3a89219fcf9..814b7a8a478be8c9283c8d9426b829d00b9350a4 100644 (file)
         "teardown": [
             "$TC actions flush action gact"
         ]
+    },
+    {
+        "id": "ca89",
+        "name": "Replace gact action with invalid goto chain control",
+        "category": [
+            "actions",
+            "gact"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action gact",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action pass random determ drop 2 index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action goto chain 42 random determ drop 5 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions list action gact",
+        "matchPattern": "action order [0-9]*: gact action pass.*random type determ drop val 2.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action gact"
+        ]
     }
 ]
index 0da3545cabdb6239190c8e916d28e25a882b78d2..c13a68b98fc775086d2087205ccefeb5304a4e7a 100644 (file)
         "matchPattern": "action order [0-9]*: ife encode action pipe.*allow prio.*index 4",
         "matchCount": "0",
         "teardown": []
+    },
+    {
+        "id": "a0e2",
+        "name": "Replace ife encode action with invalid goto chain control",
+        "category": [
+            "actions",
+            "ife"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action ife",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action ife encode allow mark pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action ife encode allow mark goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action ife index 90",
+        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E .*allow mark.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action ife"
+        ]
     }
 ]
index db49fd0f84459fdfa844bfea2fceb8723ad7b1d1..6e5fb3d256811a9c606505072d2cd1e2db085088 100644 (file)
         "teardown": [
             "$TC actions flush action mirred"
         ]
+    },
+    {
+        "id": "2a9a",
+        "name": "Replace mirred action with invalid goto chain control",
+        "category": [
+            "actions",
+            "mirred"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action mirred",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action mirred ingress mirror dev lo drop index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action mirred ingress mirror dev lo goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action mirred index 90",
+        "matchPattern": "action order [0-9]*: mirred \\(Ingress Mirror to device lo\\) drop.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action mirred"
+        ]
     }
 ]
index 0080dc2fd41c4542ac21f5f655a80c4c6225644a..bc12c1ccad30e9660c5ab19abfc8064aece57f05 100644 (file)
         "teardown": [
             "$TC actions flush action nat"
         ]
+    },
+    {
+        "id": "4b12",
+        "name": "Replace nat action with invalid goto chain control",
+        "category": [
+            "actions",
+            "nat"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action nat",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action nat ingress 1.18.1.1 1.18.2.2 drop index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action nat ingress 1.18.1.1 1.18.2.2 goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action nat index 90",
+        "matchPattern": "action order [0-9]+:  nat ingress 1.18.1.1/32 1.18.2.2 drop.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action nat"
+        ]
     }
 ]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
new file mode 100644 (file)
index 0000000..b73ceb9
--- /dev/null
@@ -0,0 +1,51 @@
+[
+    {
+        "id": "319a",
+        "name": "Add pedit action that mangles IP TTL",
+        "category": [
+            "actions",
+            "pedit"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action pedit",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action pedit ex munge ip ttl set 10",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions ls action pedit",
+        "matchPattern": "action order [0-9]+:  pedit action pass keys 1.*index 1 ref.*key #0  at ipv4\\+8: val 0a000000 mask 00ffffff",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action pedit"
+        ]
+    },
+    {
+        "id": "7e67",
+        "name": "Replace pedit action with invalid goto chain",
+        "category": [
+            "actions",
+            "pedit"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action pedit",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action pedit ex munge ip ttl set 10 pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action pedit ex munge ip ttl set 10 goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions ls action pedit",
+        "matchPattern": "action order [0-9]+:  pedit action pass keys 1.*index 90 ref.*key #0  at ipv4\\+8: val 0a000000 mask 00ffffff",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action pedit"
+        ]
+    }
+]
index 4086a50a670ecba9cc46cb9872061e9151a24e42..b8268da5adaaa77a1cf4a3a7092b6beb4fffeced 100644 (file)
         "teardown": [
             "$TC actions flush action police"
         ]
+    },
+    {
+        "id": "689e",
+        "name": "Replace police action with invalid goto chain control",
+        "category": [
+            "actions",
+            "police"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action police",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action police rate 3mbit burst 250k drop index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action police rate 3mbit burst 250k goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action police index 90",
+        "matchPattern": "action order [0-9]*:  police 0x5a rate 3Mbit burst 250Kb mtu 2Kb action drop",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action police"
+        ]
     }
 ]
index 3aca33c00039615eb4687fbabf64c75b7e6bb388..ddabb160a11bacc151b9891f14f7da62846b8ed1 100644 (file)
             "$TC actions flush action sample"
         ]
     },
+    {
+        "id": "7571",
+        "name": "Add sample action with invalid rate",
+        "category": [
+            "actions",
+            "sample"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action sample",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action sample rate 0 group 1 index 2",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action sample index 2",
+        "matchPattern": "action order [0-9]+: sample rate 1/0 group 1.*index 2 ref",
+        "matchCount": "0",
+        "teardown": [
+            "$TC actions flush action sample"
+        ]
+    },
     {
         "id": "b6d4",
         "name": "Add sample action with mandatory arguments and invalid control action",
         "teardown": [
             "$TC actions flush action sample"
         ]
+    },
+    {
+        "id": "0a6e",
+        "name": "Replace sample action with invalid goto chain control",
+        "category": [
+            "actions",
+            "sample"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action sample",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action sample rate 1024 group 4 pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action sample rate 1024 group 7 goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions list action sample",
+        "matchPattern": "action order [0-9]+: sample rate 1/1024 group 4 pass.*index 90",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action sample"
+        ]
     }
 ]
index e89a7aa4012d1664ef5a3f1d7263aaf70b3bf785..8e8c1ae12260877fea635022a4d3a194a8f3c65c 100644 (file)
         "teardown": [
             ""
         ]
+    },
+    {
+        "id": "b776",
+        "name": "Replace simple action with invalid goto chain control",
+        "category": [
+            "actions",
+            "simple"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action simple",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action simple sdata \"hello\" pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action simple sdata \"world\" goto chain 42 index  90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions list action simple",
+        "matchPattern": "action order [0-9]*: Simple <hello>.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action simple"
+        ]
     }
 ]
index 5aaf593b914a3646d8e62922b6dba210724fa8d9..ecd96eda7f6a1044996b2afaa37e325008acfa0e 100644 (file)
         "teardown": [
             "$TC actions flush action skbedit"
         ]
+    },
+    {
+        "id": "1b2b",
+        "name": "Replace skbedit action with invalid goto_chain control",
+        "category": [
+            "actions",
+            "skbedit"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action skbedit",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action skbedit ptype host pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action skbedit ptype host goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions list action skbedit",
+        "matchPattern": "action order [0-9]*: skbedit  ptype host pass.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action skbedit"
+        ]
     }
 ]
index fe3326e939c1b11bc008b46f452c336218906460..6eb4c4f97060fd3116a77537d8566f9e66d14060 100644 (file)
         "teardown": [
             "$TC actions flush action skbmod"
         ]
+    },
+    {
+        "id": "b651",
+        "name": "Replace skbmod action with invalid goto_chain control",
+        "category": [
+            "actions",
+            "skbmod"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action skbmod",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action skbmod set etype 0x1111 pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action skbmod set etype 0x1111 goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions ls action skbmod",
+        "matchPattern": "action order [0-9]*: skbmod pass set etype 0x1111\\s+index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action skbmod"
+        ]
     }
 ]
index e7e15a7336b6dfd1516e0276343f4afa735b41dd..28453a445fdb7e0af074dd26daaff90e8d8a120b 100644 (file)
         "teardown": [
            "$TC actions flush action tunnel_key"
        ]
+    },
+    {
+        "id": "8242",
+        "name": "Replace tunnel_key set action with invalid goto chain",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 10.10.10.2 dst_ip 20.20.20.1 dst_port 3129 id 2 csum goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action tunnel_key index 90",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*dst_port 3128.*csum pass.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
     }
 ]
index 69ea09eefffc27290b5c16b084f8ff3798c184e8..cc7c7d75800809115bc22d62dbcd4c646a7f35d3 100644 (file)
         "teardown": [
             "$TC actions flush action vlan"
         ]
+    },
+    {
+        "id": "e394",
+        "name": "Replace vlan push action with invalid goto chain control",
+        "category": [
+            "actions",
+            "vlan"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action vlan",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action vlan push id 500 pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action vlan push id 500 goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action vlan index 90",
+        "matchPattern": "action order [0-9]+: vlan.*push id 500 protocol 802.1Q priority 0 pass.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action vlan"
+        ]
     }
 ]
index 99a5ffca1088ad52cdac7842739eaefc40f99c07..2d096b2abf2c943064ae86e674d2a7a65f49757d 100644 (file)
             "$TC qdisc del dev $DEV1 ingress"
         ]
     },
+    {
+        "id": "2638",
+        "name": "Add matchall and try to get it",
+        "category": [
+            "filter",
+            "matchall"
+        ],
+        "setup": [
+            "$TC qdisc add dev $DEV1 clsact",
+            "$TC filter add dev $DEV1 protocol all pref 1 ingress handle 0x1234 matchall action ok"
+        ],
+        "cmdUnderTest": "$TC filter get dev $DEV1 protocol all pref 1 ingress handle 0x1234 matchall",
+        "expExitCode": "0",
+        "verifyCmd": "$TC filter show dev $DEV1 ingress",
+        "matchPattern": "filter protocol all pref 1 matchall chain 0 handle 0x1234",
+        "matchCount": "1",
+        "teardown": [
+            "$TC qdisc del dev $DEV1 clsact"
+        ]
+    },
     {
         "id": "d052",
         "name": "Add 1M filters with the same action",
index 40ea95ce2eadacf07f0dd8b197f94e91a9138b64..828c185846248031ff598670d393e34a38fa24df 100644 (file)
@@ -22,6 +22,7 @@ TPM2_CC_UNSEAL = 0x015E
 TPM2_CC_FLUSH_CONTEXT = 0x0165
 TPM2_CC_START_AUTH_SESSION = 0x0176
 TPM2_CC_GET_CAPABILITY = 0x017A
+TPM2_CC_GET_RANDOM = 0x017B
 TPM2_CC_PCR_READ = 0x017E
 TPM2_CC_POLICY_PCR = 0x017F
 TPM2_CC_PCR_EXTEND = 0x0182
@@ -357,9 +358,9 @@ class Client:
         self.flags = flags
 
         if (self.flags & Client.FLAG_SPACE) == 0:
-            self.tpm = open('/dev/tpm0', 'r+b')
+            self.tpm = open('/dev/tpm0', 'r+b', buffering=0)
         else:
-            self.tpm = open('/dev/tpmrm0', 'r+b')
+            self.tpm = open('/dev/tpmrm0', 'r+b', buffering=0)
 
     def close(self):
         self.tpm.close()
index 3bb066fea4a01171f31f80df653c3a046c35acdd..d4973be53493226b19dcca5e7140e997ea964e8b 100644 (file)
@@ -158,6 +158,69 @@ class SmokeTest(unittest.TestCase):
             pass
         self.assertEqual(rejected, True)
 
+    def test_read_partial_resp(self):
+        try:
+            fmt = '>HIIH'
+            cmd = struct.pack(fmt,
+                              tpm2.TPM2_ST_NO_SESSIONS,
+                              struct.calcsize(fmt),
+                              tpm2.TPM2_CC_GET_RANDOM,
+                              0x20)
+            self.client.tpm.write(cmd)
+            hdr = self.client.tpm.read(10)
+            sz = struct.unpack('>I', hdr[2:6])[0]
+            rsp = self.client.tpm.read()
+        except:
+            pass
+        self.assertEqual(sz, 10 + 2 + 32)
+        self.assertEqual(len(rsp), 2 + 32)
+
+    def test_read_partial_overwrite(self):
+        try:
+            fmt = '>HIIH'
+            cmd = struct.pack(fmt,
+                              tpm2.TPM2_ST_NO_SESSIONS,
+                              struct.calcsize(fmt),
+                              tpm2.TPM2_CC_GET_RANDOM,
+                              0x20)
+            self.client.tpm.write(cmd)
+            # Read part of the respone
+            rsp1 = self.client.tpm.read(15)
+
+            # Send a new cmd
+            self.client.tpm.write(cmd)
+
+            # Read the whole respone
+            rsp2 = self.client.tpm.read()
+        except:
+            pass
+        self.assertEqual(len(rsp1), 15)
+        self.assertEqual(len(rsp2), 10 + 2 + 32)
+
+    def test_send_two_cmds(self):
+        rejected = False
+        try:
+            fmt = '>HIIH'
+            cmd = struct.pack(fmt,
+                              tpm2.TPM2_ST_NO_SESSIONS,
+                              struct.calcsize(fmt),
+                              tpm2.TPM2_CC_GET_RANDOM,
+                              0x20)
+            self.client.tpm.write(cmd)
+
+            # expect the second one to raise -EBUSY error
+            self.client.tpm.write(cmd)
+            rsp = self.client.tpm.read()
+
+        except IOError, e:
+            # read the response
+            rsp = self.client.tpm.read()
+            rejected = True
+            pass
+        except:
+            pass
+        self.assertEqual(rejected, True)
+
 class SpaceTest(unittest.TestCase):
     def setUp(self):
         logging.basicConfig(filename='SpaceTest.log', level=logging.DEBUG)
index 3417f2dbc3667c372d4ed838a9f95f3826555650..7fc272ecae1621e0306a8c00065d670ad7694ce6 100644 (file)
@@ -507,6 +507,14 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 
+       /*
+        * Update the timer output so that it is likely to match the
+        * state we're about to restore. If the timer expires between
+        * this point and the register restoration, we'll take the
+        * interrupt anyway.
+        */
+       kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
+
        /*
         * When using a userspace irqchip with the architected timers and a
         * host interrupt controller that doesn't support an active state, we
@@ -730,7 +738,6 @@ static void kvm_timer_init_interrupt(void *info)
 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
 {
        struct arch_timer_context *timer;
-       bool level;
 
        switch (regid) {
        case KVM_REG_ARM_TIMER_CTL:
@@ -758,10 +765,6 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
                return -1;
        }
 
-       level = kvm_timer_should_fire(timer);
-       kvm_timer_update_irq(vcpu, level, timer);
-       timer_emulate(timer);
-
        return 0;
 }
 
@@ -812,7 +815,7 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
 
        switch (treg) {
        case TIMER_REG_TVAL:
-               val = kvm_phys_timer_read() - timer->cntvoff - timer->cnt_cval;
+               val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff;
                break;
 
        case TIMER_REG_CTL:
@@ -858,7 +861,7 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
 {
        switch (treg) {
        case TIMER_REG_TVAL:
-               timer->cnt_cval = val - kvm_phys_timer_read() - timer->cntvoff;
+               timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + val;
                break;
 
        case TIMER_REG_CTL:
index 99c37384ba7bd3766db4fb863d6baea908210c75..f412ebc906100e4b5df5e4d30ef1ab9cc68d1c07 100644 (file)
@@ -934,7 +934,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
                               const struct kvm_vcpu_init *init)
 {
-       unsigned int i;
+       unsigned int i, ret;
        int phys_target = kvm_target_cpu();
 
        if (init->target != phys_target)
@@ -969,9 +969,14 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
        vcpu->arch.target = phys_target;
 
        /* Now we know what it is, we can reset it. */
-       return kvm_reset_vcpu(vcpu);
-}
+       ret = kvm_reset_vcpu(vcpu);
+       if (ret) {
+               vcpu->arch.target = -1;
+               bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
+       }
 
+       return ret;
+}
 
 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
                                         struct kvm_vcpu_init *init)
index 264d92da32403810ea316912f9cba05eea040215..370bd6c5e6cb3e0e2a88bd985c38fd77a6277812 100644 (file)
@@ -222,7 +222,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
                }
        }
 
-       if (used_lrs) {
+       if (used_lrs || cpu_if->its_vpe.its_vm) {
                int i;
                u32 elrsr;
 
@@ -247,7 +247,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
        u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
        int i;
 
-       if (used_lrs) {
+       if (used_lrs || cpu_if->its_vpe.its_vm) {
                write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
 
                for (i = 0; i < used_lrs; i++)
index ffd7acdceac7397d126bfb70f010981c1489ae6c..a39dcfdbcc6527afc0ebc911cf0262238a2c131d 100644 (file)
@@ -102,8 +102,7 @@ static bool kvm_is_device_pfn(unsigned long pfn)
  * @addr:      IPA
  * @pmd:       pmd pointer for IPA
  *
- * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
- * pages in the range dirty.
+ * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs.
  */
 static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
 {
@@ -121,8 +120,7 @@ static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
  * @addr:      IPA
  * @pud:       pud pointer for IPA
  *
- * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs. Marks all
- * pages in the range dirty.
+ * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs.
  */
 static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
 {
@@ -899,9 +897,8 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
  * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
  * @kvm:       The KVM struct pointer for the VM.
  *
- * Allocates only the stage-2 HW PGD level table(s) (can support either full
- * 40-bit input addresses or limited to 32-bit input addresses). Clears the
- * allocated pages.
+ * Allocates only the stage-2 HW PGD level table(s) of size defined by
+ * stage2_pgd_size(kvm).
  *
  * Note we don't need locking here as this is only called when the VM is
  * created, which can only be done once.
@@ -1067,25 +1064,43 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
 {
        pmd_t *pmd, old_pmd;
 
+retry:
        pmd = stage2_get_pmd(kvm, cache, addr);
        VM_BUG_ON(!pmd);
 
        old_pmd = *pmd;
+       /*
+        * Multiple vcpus faulting on the same PMD entry, can
+        * lead to them sequentially updating the PMD with the
+        * same value. Following the break-before-make
+        * (pmd_clear() followed by tlb_flush()) process can
+        * hinder forward progress due to refaults generated
+        * on missing translations.
+        *
+        * Skip updating the page table if the entry is
+        * unchanged.
+        */
+       if (pmd_val(old_pmd) == pmd_val(*new_pmd))
+               return 0;
+
        if (pmd_present(old_pmd)) {
                /*
-                * Multiple vcpus faulting on the same PMD entry, can
-                * lead to them sequentially updating the PMD with the
-                * same value. Following the break-before-make
-                * (pmd_clear() followed by tlb_flush()) process can
-                * hinder forward progress due to refaults generated
-                * on missing translations.
+                * If we already have PTE level mapping for this block,
+                * we must unmap it to avoid inconsistent TLB state and
+                * leaking the table page. We could end up in this situation
+                * if the memory slot was marked for dirty logging and was
+                * reverted, leaving PTE level mappings for the pages accessed
+                * during the period. So, unmap the PTE level mapping for this
+                * block and retry, as we could have released the upper level
+                * table in the process.
                 *
-                * Skip updating the page table if the entry is
-                * unchanged.
+                * Normal THP split/merge follows mmu_notifier callbacks and do
+                * get handled accordingly.
                 */
-               if (pmd_val(old_pmd) == pmd_val(*new_pmd))
-                       return 0;
-
+               if (!pmd_thp_or_huge(old_pmd)) {
+                       unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE);
+                       goto retry;
+               }
                /*
                 * Mapping in huge pages should only happen through a
                 * fault.  If a page is merged into a transparent huge
@@ -1097,8 +1112,7 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
                 * should become splitting first, unmapped, merged,
                 * and mapped back in on-demand.
                 */
-               VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
-
+               WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
                pmd_clear(pmd);
                kvm_tlb_flush_vmid_ipa(kvm, addr);
        } else {
@@ -1114,6 +1128,7 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
 {
        pud_t *pudp, old_pud;
 
+retry:
        pudp = stage2_get_pud(kvm, cache, addr);
        VM_BUG_ON(!pudp);
 
@@ -1121,14 +1136,23 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
 
        /*
         * A large number of vcpus faulting on the same stage 2 entry,
-        * can lead to a refault due to the
-        * stage2_pud_clear()/tlb_flush(). Skip updating the page
-        * tables if there is no change.
+        * can lead to a refault due to the stage2_pud_clear()/tlb_flush().
+        * Skip updating the page tables if there is no change.
         */
        if (pud_val(old_pud) == pud_val(*new_pudp))
                return 0;
 
        if (stage2_pud_present(kvm, old_pud)) {
+               /*
+                * If we already have table level mapping for this block, unmap
+                * the range for this block and retry.
+                */
+               if (!stage2_pud_huge(kvm, old_pud)) {
+                       unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE);
+                       goto retry;
+               }
+
+               WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
                stage2_pud_clear(kvm, pudp);
                kvm_tlb_flush_vmid_ipa(kvm, addr);
        } else {
@@ -1451,13 +1475,11 @@ static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
 }
 
 /**
-  * stage2_wp_puds - write protect PGD range
-  * @pgd:      pointer to pgd entry
-  * @addr:     range start address
-  * @end:      range end address
-  *
-  * Process PUD entries, for a huge PUD we cause a panic.
-  */
+ * stage2_wp_puds - write protect PGD range
+ * @pgd:       pointer to pgd entry
+ * @addr:      range start address
+ * @end:       range end address
+ */
 static void  stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
                            phys_addr_t addr, phys_addr_t end)
 {
@@ -1594,8 +1616,9 @@ static void kvm_send_hwpoison_signal(unsigned long address,
        send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
 }
 
-static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
-                                              unsigned long hva)
+static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
+                                              unsigned long hva,
+                                              unsigned long map_size)
 {
        gpa_t gpa_start;
        hva_t uaddr_start, uaddr_end;
@@ -1610,34 +1633,34 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
 
        /*
         * Pages belonging to memslots that don't have the same alignment
-        * within a PMD for userspace and IPA cannot be mapped with stage-2
-        * PMD entries, because we'll end up mapping the wrong pages.
+        * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
+        * PMD/PUD entries, because we'll end up mapping the wrong pages.
         *
         * Consider a layout like the following:
         *
         *    memslot->userspace_addr:
         *    +-----+--------------------+--------------------+---+
-        *    |abcde|fgh  Stage-1 PMD    |    Stage-1 PMD   tv|xyz|
+        *    |abcde|fgh  Stage-1 block  |    Stage-1 block tv|xyz|
         *    +-----+--------------------+--------------------+---+
         *
         *    memslot->base_gfn << PAGE_SIZE:
         *      +---+--------------------+--------------------+-----+
-        *      |abc|def  Stage-2 PMD    |    Stage-2 PMD     |tvxyz|
+        *      |abc|def  Stage-2 block  |    Stage-2 block   |tvxyz|
         *      +---+--------------------+--------------------+-----+
         *
-        * If we create those stage-2 PMDs, we'll end up with this incorrect
+        * If we create those stage-2 blocks, we'll end up with this incorrect
         * mapping:
         *   d -> f
         *   e -> g
         *   f -> h
         */
-       if ((gpa_start & ~S2_PMD_MASK) != (uaddr_start & ~S2_PMD_MASK))
+       if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
                return false;
 
        /*
         * Next, let's make sure we're not trying to map anything not covered
-        * by the memslot. This means we have to prohibit PMD size mappings
-        * for the beginning and end of a non-PMD aligned and non-PMD sized
+        * by the memslot. This means we have to prohibit block size mappings
+        * for the beginning and end of a non-block aligned and non-block sized
         * memory slot (illustrated by the head and tail parts of the
         * userspace view above containing pages 'abcde' and 'xyz',
         * respectively).
@@ -1646,8 +1669,8 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
         * userspace_addr or the base_gfn, as both are equally aligned (per
         * the check above) and equally sized.
         */
-       return (hva & S2_PMD_MASK) >= uaddr_start &&
-              (hva & S2_PMD_MASK) + S2_PMD_SIZE <= uaddr_end;
+       return (hva & ~(map_size - 1)) >= uaddr_start &&
+              (hva & ~(map_size - 1)) + map_size <= uaddr_end;
 }
 
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
@@ -1676,12 +1699,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                return -EFAULT;
        }
 
-       if (!fault_supports_stage2_pmd_mappings(memslot, hva))
-               force_pte = true;
-
-       if (logging_active)
-               force_pte = true;
-
        /* Let's check if we will get back a huge page backed by hugetlbfs */
        down_read(&current->mm->mmap_sem);
        vma = find_vma_intersection(current->mm, hva, hva + 1);
@@ -1692,6 +1709,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        }
 
        vma_pagesize = vma_kernel_pagesize(vma);
+       if (logging_active ||
+           !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
+               force_pte = true;
+               vma_pagesize = PAGE_SIZE;
+       }
+
        /*
         * The stage2 has a minimum of 2 level table (For arm64 see
         * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
@@ -1699,11 +1722,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * As for PUD huge maps, we must make sure that we have at least
         * 3 levels, i.e, PMD is not folded.
         */
-       if ((vma_pagesize == PMD_SIZE ||
-            (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) &&
-           !force_pte) {
+       if (vma_pagesize == PMD_SIZE ||
+           (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
                gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
-       }
        up_read(&current->mm->mmap_sem);
 
        /* We need minimum second+third level pages */
@@ -1760,8 +1781,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                 * Only PMD_SIZE transparent hugepages(THP) are
                 * currently supported. This code will need to be
                 * updated to support other THP sizes.
+                *
+                * Make sure the host VA and the guest IPA are sufficiently
+                * aligned and that the block is contained within the memslot.
                 */
-               if (transparent_hugepage_adjust(&pfn, &fault_ipa))
+               if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
+                   transparent_hugepage_adjust(&pfn, &fault_ipa))
                        vma_pagesize = PMD_SIZE;
        }
 
index ab3f47745d9caaedf263577c4a7263d6386f70b5..44ceaccb18cff19655c2668fcc2f5a2f482af34a 100644 (file)
@@ -754,8 +754,9 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
        u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
        phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
        int esz = GITS_BASER_ENTRY_SIZE(baser);
-       int index;
+       int index, idx;
        gfn_t gfn;
+       bool ret;
 
        switch (type) {
        case GITS_BASER_TYPE_DEVICE:
@@ -782,7 +783,8 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
 
                if (eaddr)
                        *eaddr = addr;
-               return kvm_is_visible_gfn(its->dev->kvm, gfn);
+
+               goto out;
        }
 
        /* calculate and check the index into the 1st level */
@@ -812,7 +814,12 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
 
        if (eaddr)
                *eaddr = indirect_ptr;
-       return kvm_is_visible_gfn(its->dev->kvm, gfn);
+
+out:
+       idx = srcu_read_lock(&its->dev->kvm->srcu);
+       ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
+       srcu_read_unlock(&its->dev->kvm->srcu, idx);
+       return ret;
 }
 
 static int vgic_its_alloc_collection(struct vgic_its *its,
@@ -1729,8 +1736,8 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev)
        kfree(its);
 }
 
-int vgic_its_has_attr_regs(struct kvm_device *dev,
-                          struct kvm_device_attr *attr)
+static int vgic_its_has_attr_regs(struct kvm_device *dev,
+                                 struct kvm_device_attr *attr)
 {
        const struct vgic_register_region *region;
        gpa_t offset = attr->attr;
@@ -1750,9 +1757,9 @@ int vgic_its_has_attr_regs(struct kvm_device *dev,
        return 0;
 }
 
-int vgic_its_attr_regs_access(struct kvm_device *dev,
-                             struct kvm_device_attr *attr,
-                             u64 *reg, bool is_write)
+static int vgic_its_attr_regs_access(struct kvm_device *dev,
+                                    struct kvm_device_attr *attr,
+                                    u64 *reg, bool is_write)
 {
        const struct vgic_register_region *region;
        struct vgic_its *its;
@@ -1919,7 +1926,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
               ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
                ite->collection->collection_id;
        val = cpu_to_le64(val);
-       return kvm_write_guest(kvm, gpa, &val, ite_esz);
+       return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
 }
 
 /**
@@ -2066,7 +2073,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
               (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
                (dev->num_eventid_bits - 1));
        val = cpu_to_le64(val);
-       return kvm_write_guest(kvm, ptr, &val, dte_esz);
+       return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
 }
 
 /**
@@ -2246,7 +2253,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
               ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
               collection->collection_id);
        val = cpu_to_le64(val);
-       return kvm_write_guest(its->dev->kvm, gpa, &val, esz);
+       return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
 }
 
 static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
@@ -2317,7 +2324,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
         */
        val = 0;
        BUG_ON(cte_esz > sizeof(val));
-       ret = kvm_write_guest(its->dev->kvm, gpa, &val, cte_esz);
+       ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
        return ret;
 }
 
index 4a12322bf7df81215d705eb3f7b5ab825d625fdc..9f4843fe9cda64e6f527f1c3f5518b86d8b2d1d9 100644 (file)
@@ -200,6 +200,9 @@ static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
 
        vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS;
 
+       if (was_enabled && !vgic_cpu->lpis_enabled)
+               vgic_flush_pending_lpis(vcpu);
+
        if (!was_enabled && vgic_cpu->lpis_enabled)
                vgic_enable_lpis(vcpu);
 }
index 408a78eb6a97b13d48ff576d81a113e0c3da49b9..9f87e58dbd4aebaae1c7688fc2251dec21bb191a 100644 (file)
@@ -358,7 +358,7 @@ retry:
        if (status) {
                /* clear consumed data */
                val &= ~(1 << bit_nr);
-               ret = kvm_write_guest(kvm, ptr, &val, 1);
+               ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
                if (ret)
                        return ret;
        }
@@ -409,7 +409,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
                else
                        val &= ~(1 << bit_nr);
 
-               ret = kvm_write_guest(kvm, ptr, &val, 1);
+               ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
                if (ret)
                        return ret;
        }
index abd9c735267784a3085b797d33133a579dc629c6..191deccf60bf9c6dadc3afd3dab4f330e765262b 100644 (file)
@@ -151,6 +151,27 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
        kfree(irq);
 }
 
+void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
+{
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       struct vgic_irq *irq, *tmp;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
+
+       list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
+               if (irq->intid >= VGIC_MIN_LPI) {
+                       raw_spin_lock(&irq->irq_lock);
+                       list_del(&irq->ap_list);
+                       irq->vcpu = NULL;
+                       raw_spin_unlock(&irq->irq_lock);
+                       vgic_put_irq(vcpu->kvm, irq);
+               }
+       }
+
+       raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
+}
+
 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
 {
        WARN_ON(irq_set_irqchip_state(irq->host_irq,
@@ -867,15 +888,21 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
         * either observe the new interrupt before or after doing this check,
         * and introducing additional synchronization mechanism doesn't change
         * this.
+        *
+        * Note that we still need to go through the whole thing if anything
+        * can be directly injected (GICv4).
         */
-       if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
+       if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
+           !vgic_supports_direct_msis(vcpu->kvm))
                return;
 
        DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
 
-       raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
-       vgic_flush_lr_state(vcpu);
-       raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+       if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
+               raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
+               vgic_flush_lr_state(vcpu);
+               raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+       }
 
        if (can_access_vgic_from_kernel())
                vgic_restore_state(vcpu);
index a90024718ca44b941e8b4d7aa0f6bf20233c4596..abeeffabc456cb284ff2397769b6d55986f3931f 100644 (file)
@@ -238,6 +238,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu);
 bool vgic_has_its(struct kvm *kvm);
 int kvm_vgic_register_its_device(void);
 void vgic_enable_lpis(struct kvm_vcpu *vcpu);
+void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu);
 int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
 int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
 int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
index 4325250afd728447630a2decb1333965d5fd2cf8..001aeda4c154d4a9ff5b2e9f73cebdc743c2c29b 100644 (file)
@@ -214,9 +214,9 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
 
        if (flags & EPOLLHUP) {
                /* The eventfd is closing, detach from KVM */
-               unsigned long flags;
+               unsigned long iflags;
 
-               spin_lock_irqsave(&kvm->irqfds.lock, flags);
+               spin_lock_irqsave(&kvm->irqfds.lock, iflags);
 
                /*
                 * We must check if someone deactivated the irqfd before
@@ -230,7 +230,7 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
                if (irqfd_is_active(irqfd))
                        irqfd_deactivate(irqfd);
 
-               spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
+               spin_unlock_irqrestore(&kvm->irqfds.lock, iflags);
        }
 
        return 0;
index 3547b0d8c91ea2c84e0869b769e9947829fe4286..79e59e4fa3dc6be751079e669e214b7fc614e07f 100644 (file)
@@ -144,18 +144,19 @@ static int setup_routing_entry(struct kvm *kvm,
 {
        struct kvm_kernel_irq_routing_entry *ei;
        int r;
+       u32 gsi = array_index_nospec(ue->gsi, KVM_MAX_IRQ_ROUTES);
 
        /*
         * Do not allow GSI to be mapped to the same irqchip more than once.
         * Allow only one to one mapping between GSI and non-irqchip routing.
         */
-       hlist_for_each_entry(ei, &rt->map[ue->gsi], link)
+       hlist_for_each_entry(ei, &rt->map[gsi], link)
                if (ei->type != KVM_IRQ_ROUTING_IRQCHIP ||
                    ue->type != KVM_IRQ_ROUTING_IRQCHIP ||
                    ue->u.irqchip.irqchip == ei->irqchip.irqchip)
                        return -EINVAL;
 
-       e->gsi = ue->gsi;
+       e->gsi = gsi;
        e->type = ue->type;
        r = kvm_set_routing_entry(kvm, e, ue);
        if (r)
index f25aa98a94df430b6064c31e89ff1d614d8846b8..a704d1f9bd962e99d3b6fb927cbf0d3574dc1f41 100644 (file)
@@ -1240,7 +1240,7 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
        if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
                return -EINVAL;
 
-       if ((log->first_page & 63) || (log->num_pages & 63))
+       if (log->first_page & 63)
                return -EINVAL;
 
        slots = __kvm_memslots(kvm, as_id);
@@ -1253,8 +1253,9 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
        n = kvm_dirty_bitmap_bytes(memslot);
 
        if (log->first_page > memslot->npages ||
-           log->num_pages > memslot->npages - log->first_page)
-                       return -EINVAL;
+           log->num_pages > memslot->npages - log->first_page ||
+           (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
+           return -EINVAL;
 
        *flush = false;
        dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
@@ -2905,6 +2906,9 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
 {
        struct kvm_device *dev = filp->private_data;
 
+       if (dev->kvm->mm != current->mm)
+               return -EIO;
+
        switch (ioctl) {
        case KVM_SET_DEVICE_ATTR:
                return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
@@ -2974,12 +2978,14 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
        struct kvm_device_ops *ops = NULL;
        struct kvm_device *dev;
        bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
+       int type;
        int ret;
 
        if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
                return -ENODEV;
 
-       ops = kvm_device_ops_table[cd->type];
+       type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
+       ops = kvm_device_ops_table[type];
        if (ops == NULL)
                return -ENODEV;
 
@@ -2994,7 +3000,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
        dev->kvm = kvm;
 
        mutex_lock(&kvm->lock);
-       ret = ops->create(dev, cd->type);
+       ret = ops->create(dev, type);
        if (ret < 0) {
                mutex_unlock(&kvm->lock);
                kfree(dev);