Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 23 Oct 2018 12:32:18 +0000 (13:32 +0100)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 23 Oct 2018 12:32:18 +0000 (13:32 +0100)
Pull perf updates from Ingo Molnar:
 "The main updates in this cycle were:

   - Lots of perf tooling changes too voluminous to list (big perf trace
     and perf stat improvements, lots of libtraceevent reorganization,
     etc.), so I'll list the authors and refer to the changelog for
     details:

       Benjamin Peterson, Jérémie Galarneau, Kim Phillips, Peter
       Zijlstra, Ravi Bangoria, Sangwon Hong, Sean V Kelley, Steven
       Rostedt, Thomas Gleixner, Ding Xiang, Eduardo Habkost, Thomas
       Richter, Andi Kleen, Sanskriti Sharma, Adrian Hunter, Tzvetomir
       Stoyanov, Arnaldo Carvalho de Melo, Jiri Olsa.

     ... with the bulk of the changes written by Jiri Olsa, Tzvetomir
     Stoyanov and Arnaldo Carvalho de Melo.

   - Continued intel_rdt work with a focus on playing well with perf
     events. This also imported some non-perf RDT work due to
     dependencies. (Reinette Chatre)

   - Implement counter freezing for Arch Perfmon v4 (Skylake and newer).
     This allows to speed up the PMI handler by avoiding unnecessary MSR
     writes and make it more accurate. (Andi Kleen)

   - kprobes cleanups and simplification (Masami Hiramatsu)

   - Intel Goldmont PMU updates (Kan Liang)

   - ... plus misc other fixes and updates"

* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (155 commits)
  kprobes/x86: Use preempt_enable() in optimized_callback()
  x86/intel_rdt: Prevent pseudo-locking from using stale pointers
  kprobes, x86/ptrace.h: Make regs_get_kernel_stack_nth() not fault on bad stack
  perf/x86/intel: Export mem events only if there's PEBS support
  x86/cpu: Drop pointless static qualifier in punit_dev_state_show()
  x86/intel_rdt: Fix initial allocation to consider CDP
  x86/intel_rdt: CBM overlap should also check for overlap with CDP peer
  x86/intel_rdt: Introduce utility to obtain CDP peer
  tools lib traceevent, perf tools: Move struct tep_handler definition in a local header file
  tools lib traceevent: Separate out tep_strerror() for strerror_r() issues
  perf python: More portable way to make CFLAGS work with clang
  perf python: Make clang_has_option() work on Python 3
  perf tools: Free temporary 'sys' string in read_event_files()
  perf tools: Avoid double free in read_event_file()
  perf tools: Free 'printk' string in parse_ftrace_printk()
  perf tools: Cleanup trace-event-info 'tdata' leak
  perf strbuf: Match va_{add,copy} with va_end
  perf test: S390 does not support watchpoints in test 22
  perf auxtrace: Include missing asm/bitsperlong.h to get BITS_PER_LONG
  tools include: Adopt linux/bits.h
  ...

1  2 
Documentation/admin-guide/kernel-parameters.txt
arch/x86/include/asm/ptrace.h
arch/x86/kernel/tsc.c
drivers/acpi/acpi_lpss.c
drivers/cpufreq/intel_pstate.c
drivers/idle/intel_idle.c
drivers/mmc/host/sdhci-acpi.c
drivers/powercap/intel_rapl.c
tools/perf/Makefile.perf
tools/perf/builtin-report.c
tools/perf/util/evsel.c

index e9054fd1d0d09a9e34af8b5a9f31298420dbb1cc,6795dedcbd1e0513285a3ee6fbf15f53eda94ac6..4c74a2f4ddfc38c3aaf79fa5f6db10bc524dc78e
                        causing system reset or hang due to sending
                        INIT from AP to BSP.
  
+       disable_counter_freezing [HW]
+                       Disable Intel PMU counter freezing feature.
+                       The feature only exists starting from
+                       Arch Perfmon v4 (Skylake and newer).
        disable_ddw     [PPC/PSERIES]
                        Disable Dynamic DMA Window support. Use this if
                        to workaround buggy firmware.
  
                        In kernels built with CONFIG_RCU_NOCB_CPU=y, set
                        the specified list of CPUs to be no-callback CPUs.
 -                      Invocation of these CPUs' RCU callbacks will
 -                      be offloaded to "rcuox/N" kthreads created for
 -                      that purpose, where "x" is "b" for RCU-bh, "p"
 -                      for RCU-preempt, and "s" for RCU-sched, and "N"
 -                      is the CPU number.  This reduces OS jitter on the
 -                      offloaded CPUs, which can be useful for HPC and
 -                      real-time workloads.  It can also improve energy
 -                      efficiency for asymmetric multiprocessors.
 +                      Invocation of these CPUs' RCU callbacks will be
 +                      offloaded to "rcuox/N" kthreads created for that
 +                      purpose, where "x" is "p" for RCU-preempt, and
 +                      "s" for RCU-sched, and "N" is the CPU number.
 +                      This reduces OS jitter on the offloaded CPUs,
 +                      which can be useful for HPC and real-time
 +                      workloads.  It can also improve energy efficiency
 +                      for asymmetric multiprocessors.
  
        rcu_nocb_poll   [KNL]
                        Rather than requiring that offloaded CPUs
                        Set required age in jiffies for a
                        given grace period before RCU starts
                        soliciting quiescent-state help from
 -                      rcu_note_context_switch().
 +                      rcu_note_context_switch().  If not specified, the
 +                      kernel will calculate a value based on the most
 +                      recent settings of rcutree.jiffies_till_first_fqs
 +                      and rcutree.jiffies_till_next_fqs.
 +                      This calculated value may be viewed in
 +                      rcutree.jiffies_to_sched_qs.  Any attempt to
 +                      set rcutree.jiffies_to_sched_qs will be
 +                      cheerfully overwritten.
  
        rcutree.jiffies_till_first_fqs= [KNL]
                        Set delay from grace-period initialization to
        rcupdate.rcu_self_test= [KNL]
                        Run the RCU early boot self tests
  
 -      rcupdate.rcu_self_test_bh= [KNL]
 -                      Run the RCU bh early boot self tests
 -
 -      rcupdate.rcu_self_test_sched= [KNL]
 -                      Run the RCU sched early boot self tests
 -
        rdinit=         [KNL]
                        Format: <full_path>
                        Run specified binary instead of /init from the ramdisk,
index 5e58a74bfd3a099dda4472031e0c3c293b2655cc,ee696efec99fd213697695c1351e656dd19dd438..25f49af1b13c16cf562b6e9e4a4f28f88f87e3ea
@@@ -37,10 -37,8 +37,10 @@@ struct pt_regs 
        unsigned short __esh;
        unsigned short fs;
        unsigned short __fsh;
 +      /* On interrupt, gs and __gsh store the vector number. */
        unsigned short gs;
        unsigned short __gsh;
 +      /* On interrupt, this is the error code. */
        unsigned long orig_ax;
        unsigned long ip;
        unsigned short cs;
@@@ -238,24 -236,52 +238,52 @@@ static inline int regs_within_kernel_st
                (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
  }
  
+ /**
+  * regs_get_kernel_stack_nth_addr() - get the address of the Nth entry on stack
+  * @regs:     pt_regs which contains kernel stack pointer.
+  * @n:                stack entry number.
+  *
+  * regs_get_kernel_stack_nth() returns the address of the @n th entry of the
+  * kernel stack which is specified by @regs. If the @n th entry is NOT in
+  * the kernel stack, this returns NULL.
+  */
+ static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n)
+ {
+       unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+       addr += n;
+       if (regs_within_kernel_stack(regs, (unsigned long)addr))
+               return addr;
+       else
+               return NULL;
+ }
+ /* To avoid include hell, we can't include uaccess.h */
+ extern long probe_kernel_read(void *dst, const void *src, size_t size);
  /**
   * regs_get_kernel_stack_nth() - get Nth entry of the stack
   * @regs:     pt_regs which contains kernel stack pointer.
   * @n:                stack entry number.
   *
   * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
-  * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+  * is specified by @regs. If the @n th entry is NOT in the kernel stack
   * this returns 0.
   */
  static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
                                                      unsigned int n)
  {
-       unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
-       addr += n;
-       if (regs_within_kernel_stack(regs, (unsigned long)addr))
-               return *addr;
-       else
-               return 0;
+       unsigned long *addr;
+       unsigned long val;
+       long ret;
+       addr = regs_get_kernel_stack_nth_addr(regs, n);
+       if (addr) {
+               ret = probe_kernel_read(&val, addr, sizeof(val));
+               if (!ret)
+                       return val;
+       }
+       return 0;
  }
  
  #define arch_has_single_step()        (1)
diff --combined arch/x86/kernel/tsc.c
index 6d5dc5dabfd7ec71532c9a48e00c6a70da950a35,19c3c6bec325671e5d835c9dbefca7c9a649c52b..03b7529333a679f1d9b3ac505f04a1bf82de4caf
@@@ -58,7 -58,7 +58,7 @@@ struct cyc2ns 
  
  static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
  
 -void cyc2ns_read_begin(struct cyc2ns_data *data)
 +void __always_inline cyc2ns_read_begin(struct cyc2ns_data *data)
  {
        int seq, idx;
  
@@@ -75,7 -75,7 +75,7 @@@
        } while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
  }
  
 -void cyc2ns_read_end(void)
 +void __always_inline cyc2ns_read_end(void)
  {
        preempt_enable_notrace();
  }
   *                      -johnstul@us.ibm.com "math is hard, lets go shopping!"
   */
  
 -static inline unsigned long long cycles_2_ns(unsigned long long cyc)
 +static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
  {
        struct cyc2ns_data data;
        unsigned long long ns;
@@@ -636,7 -636,7 +636,7 @@@ unsigned long native_calibrate_tsc(void
                case INTEL_FAM6_KABYLAKE_DESKTOP:
                        crystal_khz = 24000;    /* 24.0 MHz */
                        break;
-               case INTEL_FAM6_ATOM_DENVERTON:
+               case INTEL_FAM6_ATOM_GOLDMONT_X:
                        crystal_khz = 25000;    /* 25.0 MHz */
                        break;
                case INTEL_FAM6_ATOM_GOLDMONT:
diff --combined drivers/acpi/acpi_lpss.c
index 10adb8cb3a3f27c4c563983a827457554686f7d5,9efa3a588620638c340fb4ea3ebc4253c718a380..b9bda06d344d7eea58fddc3a3a8328868f1c662c
@@@ -16,7 -16,6 +16,7 @@@
  #include <linux/err.h>
  #include <linux/io.h>
  #include <linux/mutex.h>
 +#include <linux/pci.h>
  #include <linux/platform_device.h>
  #include <linux/platform_data/clk-lpss.h>
  #include <linux/platform_data/x86/pmc_atom.h>
@@@ -84,7 -83,6 +84,7 @@@ struct lpss_device_desc 
        size_t prv_size_override;
        struct property_entry *properties;
        void (*setup)(struct lpss_private_data *pdata);
 +      bool resume_from_noirq;
  };
  
  static const struct lpss_device_desc lpss_dma_desc = {
@@@ -101,9 -99,6 +101,9 @@@ struct lpss_private_data 
        u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
  };
  
 +/* Devices which need to be in D3 before lpss_iosf_enter_d3_state() proceeds */
 +static u32 pmc_atom_d3_mask = 0xfe000ffe;
 +
  /* LPSS run time quirks */
  static unsigned int lpss_quirks;
  
@@@ -180,21 -175,6 +180,21 @@@ static void byt_pwm_setup(struct lpss_p
  
  static void byt_i2c_setup(struct lpss_private_data *pdata)
  {
 +      const char *uid_str = acpi_device_uid(pdata->adev);
 +      acpi_handle handle = pdata->adev->handle;
 +      unsigned long long shared_host = 0;
 +      acpi_status status;
 +      long uid = 0;
 +
 +      /* Expected to always be true, but better safe then sorry */
 +      if (uid_str)
 +              uid = simple_strtol(uid_str, NULL, 10);
 +
 +      /* Detect I2C bus shared with PUNIT and ignore its d3 status */
 +      status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
 +      if (ACPI_SUCCESS(status) && shared_host && uid)
 +              pmc_atom_d3_mask &= ~(BIT_LPSS2_F1_I2C1 << (uid - 1));
 +
        lpss_deassert_reset(pdata);
  
        if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
@@@ -294,14 -274,12 +294,14 @@@ static const struct lpss_device_desc by
        .flags = LPSS_CLK | LPSS_SAVE_CTX,
        .prv_offset = 0x800,
        .setup = byt_i2c_setup,
 +      .resume_from_noirq = true,
  };
  
  static const struct lpss_device_desc bsw_i2c_dev_desc = {
        .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
        .prv_offset = 0x800,
        .setup = byt_i2c_setup,
 +      .resume_from_noirq = true,
  };
  
  static const struct lpss_device_desc bsw_spi_dev_desc = {
  #define ICPU(model)   { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
  
  static const struct x86_cpu_id lpss_cpu_ids[] = {
-       ICPU(INTEL_FAM6_ATOM_SILVERMONT1),      /* Valleyview, Bay Trail */
+       ICPU(INTEL_FAM6_ATOM_SILVERMONT),       /* Valleyview, Bay Trail */
        ICPU(INTEL_FAM6_ATOM_AIRMONT),  /* Braswell, Cherry Trail */
        {}
  };
@@@ -349,11 -327,9 +349,11 @@@ static const struct acpi_device_id acpi
        { "INT33FC", },
  
        /* Braswell LPSS devices */
 +      { "80862286", LPSS_ADDR(lpss_dma_desc) },
        { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
        { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
        { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
 +      { "808622C0", LPSS_ADDR(lpss_dma_desc) },
        { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
  
        /* Broadwell LPSS devices */
@@@ -475,35 -451,26 +475,35 @@@ struct lpss_device_links 
   */
  static const struct lpss_device_links lpss_device_links[] = {
        {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
 +      {"808622C1", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
 +      {"80860F41", "5", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
  };
  
 -static bool hid_uid_match(const char *hid1, const char *uid1,
 +static bool hid_uid_match(struct acpi_device *adev,
                          const char *hid2, const char *uid2)
  {
 -      return !strcmp(hid1, hid2) && uid1 && uid2 && !strcmp(uid1, uid2);
 +      const char *hid1 = acpi_device_hid(adev);
 +      const char *uid1 = acpi_device_uid(adev);
 +
 +      if (strcmp(hid1, hid2))
 +              return false;
 +
 +      if (!uid2)
 +              return true;
 +
 +      return uid1 && !strcmp(uid1, uid2);
  }
  
  static bool acpi_lpss_is_supplier(struct acpi_device *adev,
                                  const struct lpss_device_links *link)
  {
 -      return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
 -                           link->supplier_hid, link->supplier_uid);
 +      return hid_uid_match(adev, link->supplier_hid, link->supplier_uid);
  }
  
  static bool acpi_lpss_is_consumer(struct acpi_device *adev,
                                  const struct lpss_device_links *link)
  {
 -      return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
 -                           link->consumer_hid, link->consumer_uid);
 +      return hid_uid_match(adev, link->consumer_hid, link->consumer_uid);
  }
  
  struct hid_uid {
@@@ -519,23 -486,18 +519,23 @@@ static int match_hid_uid(struct device 
        if (!adev)
                return 0;
  
 -      return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
 -                           id->hid, id->uid);
 +      return hid_uid_match(adev, id->hid, id->uid);
  }
  
  static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
  {
 +      struct device *dev;
 +
        struct hid_uid data = {
                .hid = hid,
                .uid = uid,
        };
  
 -      return bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
 +      dev = bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
 +      if (dev)
 +              return dev;
 +
 +      return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid);
  }
  
  static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
@@@ -930,7 -892,7 +930,7 @@@ static void lpss_iosf_enter_d3_state(vo
         * Here we read the values related to LPSS power island, i.e. LPSS
         * devices, excluding both LPSS DMA controllers, along with SCC domain.
         */
 -      u32 func_dis, d3_sts_0, pmc_status, pmc_mask = 0xfe000ffe;
 +      u32 func_dis, d3_sts_0, pmc_status;
        int ret;
  
        ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);
         * Shutdown both LPSS DMA controllers if and only if all other devices
         * are already in D3hot.
         */
 -      pmc_status = (~(d3_sts_0 | func_dis)) & pmc_mask;
 +      pmc_status = (~(d3_sts_0 | func_dis)) & pmc_atom_d3_mask;
        if (pmc_status)
                goto exit;
  
@@@ -1042,7 -1004,7 +1042,7 @@@ static int acpi_lpss_resume(struct devi
  }
  
  #ifdef CONFIG_PM_SLEEP
 -static int acpi_lpss_suspend_late(struct device *dev)
 +static int acpi_lpss_do_suspend_late(struct device *dev)
  {
        int ret;
  
        return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
  }
  
 -static int acpi_lpss_resume_early(struct device *dev)
 +static int acpi_lpss_suspend_late(struct device *dev)
 +{
 +      struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
 +
 +      if (pdata->dev_desc->resume_from_noirq)
 +              return 0;
 +
 +      return acpi_lpss_do_suspend_late(dev);
 +}
 +
 +static int acpi_lpss_suspend_noirq(struct device *dev)
 +{
 +      struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
 +      int ret;
 +
 +      if (pdata->dev_desc->resume_from_noirq) {
 +              ret = acpi_lpss_do_suspend_late(dev);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      return acpi_subsys_suspend_noirq(dev);
 +}
 +
 +static int acpi_lpss_do_resume_early(struct device *dev)
  {
        int ret = acpi_lpss_resume(dev);
  
        return ret ? ret : pm_generic_resume_early(dev);
  }
 +
 +static int acpi_lpss_resume_early(struct device *dev)
 +{
 +      struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
 +
 +      if (pdata->dev_desc->resume_from_noirq)
 +              return 0;
 +
 +      return acpi_lpss_do_resume_early(dev);
 +}
 +
 +static int acpi_lpss_resume_noirq(struct device *dev)
 +{
 +      struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
 +      int ret;
 +
 +      ret = acpi_subsys_resume_noirq(dev);
 +      if (ret)
 +              return ret;
 +
 +      if (!dev_pm_may_skip_resume(dev) && pdata->dev_desc->resume_from_noirq)
 +              ret = acpi_lpss_do_resume_early(dev);
 +
 +      return ret;
 +}
 +
  #endif /* CONFIG_PM_SLEEP */
  
  static int acpi_lpss_runtime_suspend(struct device *dev)
@@@ -1138,8 -1050,8 +1138,8 @@@ static struct dev_pm_domain acpi_lpss_p
                .complete = acpi_subsys_complete,
                .suspend = acpi_subsys_suspend,
                .suspend_late = acpi_lpss_suspend_late,
 -              .suspend_noirq = acpi_subsys_suspend_noirq,
 -              .resume_noirq = acpi_subsys_resume_noirq,
 +              .suspend_noirq = acpi_lpss_suspend_noirq,
 +              .resume_noirq = acpi_lpss_resume_noirq,
                .resume_early = acpi_lpss_resume_early,
                .freeze = acpi_subsys_freeze,
                .freeze_late = acpi_subsys_freeze_late,
index 2a99e2fd941246905fff42933a72a98b8576ce77,75140dd07037c72694b7d93b7eadb592e6afe28d..49c0abf2d48f0f3189a7d57585710c2b7514da75
@@@ -373,28 -373,10 +373,28 @@@ static void intel_pstate_set_itmt_prio(
                }
        }
  }
 +
 +static int intel_pstate_get_cppc_guranteed(int cpu)
 +{
 +      struct cppc_perf_caps cppc_perf;
 +      int ret;
 +
 +      ret = cppc_get_perf_caps(cpu, &cppc_perf);
 +      if (ret)
 +              return ret;
 +
 +      return cppc_perf.guaranteed_perf;
 +}
 +
  #else
  static void intel_pstate_set_itmt_prio(int cpu)
  {
  }
 +
 +static int intel_pstate_get_cppc_guranteed(int cpu)
 +{
 +      return -ENOTSUPP;
 +}
  #endif
  
  static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
@@@ -717,29 -699,9 +717,29 @@@ static ssize_t show_energy_performance_
  
  cpufreq_freq_attr_rw(energy_performance_preference);
  
 +static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
 +{
 +      struct cpudata *cpu;
 +      u64 cap;
 +      int ratio;
 +
 +      ratio = intel_pstate_get_cppc_guranteed(policy->cpu);
 +      if (ratio <= 0) {
 +              rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
 +              ratio = HWP_GUARANTEED_PERF(cap);
 +      }
 +
 +      cpu = all_cpu_data[policy->cpu];
 +
 +      return sprintf(buf, "%d\n", ratio * cpu->pstate.scaling);
 +}
 +
 +cpufreq_freq_attr_ro(base_frequency);
 +
  static struct freq_attr *hwp_cpufreq_attrs[] = {
        &energy_performance_preference,
        &energy_performance_available_preferences,
 +      &base_frequency,
        NULL,
  };
  
@@@ -1816,7 -1778,7 +1816,7 @@@ static const struct pstate_funcs knl_fu
  static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
        ICPU(INTEL_FAM6_SANDYBRIDGE,            core_funcs),
        ICPU(INTEL_FAM6_SANDYBRIDGE_X,          core_funcs),
-       ICPU(INTEL_FAM6_ATOM_SILVERMONT1,       silvermont_funcs),
+       ICPU(INTEL_FAM6_ATOM_SILVERMONT       silvermont_funcs),
        ICPU(INTEL_FAM6_IVYBRIDGE,              core_funcs),
        ICPU(INTEL_FAM6_HASWELL_CORE,           core_funcs),
        ICPU(INTEL_FAM6_BROADWELL_CORE,         core_funcs),
        ICPU(INTEL_FAM6_XEON_PHI_KNL,           knl_funcs),
        ICPU(INTEL_FAM6_XEON_PHI_KNM,           knl_funcs),
        ICPU(INTEL_FAM6_ATOM_GOLDMONT,          core_funcs),
-       ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE,       core_funcs),
+       ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS,     core_funcs),
        ICPU(INTEL_FAM6_SKYLAKE_X,              core_funcs),
        {}
  };
index 791b8a366e6edaf533b0b66af97ced3250689a54,c4bb67ed8da35c947a7d17daf92c2a52412e1f7b..8b5d85c91e9d4b2ed10d07bf98237c9f6b7eff14
@@@ -1066,43 -1066,46 +1066,43 @@@ static const struct idle_cpu idle_cpu_d
        .disable_promotion_to_c1e = true,
  };
  
 -#define ICPU(model, cpu) \
 -      { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&cpu }
 -
  static const struct x86_cpu_id intel_idle_ids[] __initconst = {
 -      ICPU(INTEL_FAM6_NEHALEM_EP,             idle_cpu_nehalem),
 -      ICPU(INTEL_FAM6_NEHALEM,                idle_cpu_nehalem),
 -      ICPU(INTEL_FAM6_NEHALEM_G,              idle_cpu_nehalem),
 -      ICPU(INTEL_FAM6_WESTMERE,               idle_cpu_nehalem),
 -      ICPU(INTEL_FAM6_WESTMERE_EP,            idle_cpu_nehalem),
 -      ICPU(INTEL_FAM6_NEHALEM_EX,             idle_cpu_nehalem),
 -      ICPU(INTEL_FAM6_ATOM_BONNELL,           idle_cpu_atom),
 -      ICPU(INTEL_FAM6_ATOM_BONNELL_MID,               idle_cpu_lincroft),
 -      ICPU(INTEL_FAM6_WESTMERE_EX,            idle_cpu_nehalem),
 -      ICPU(INTEL_FAM6_SANDYBRIDGE,            idle_cpu_snb),
 -      ICPU(INTEL_FAM6_SANDYBRIDGE_X,          idle_cpu_snb),
 -      ICPU(INTEL_FAM6_ATOM_SALTWELL,          idle_cpu_atom),
 -      ICPU(INTEL_FAM6_ATOM_SILVERMONT,        idle_cpu_byt),
 -      ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID,    idle_cpu_tangier),
 -      ICPU(INTEL_FAM6_ATOM_AIRMONT,           idle_cpu_cht),
 -      ICPU(INTEL_FAM6_IVYBRIDGE,              idle_cpu_ivb),
 -      ICPU(INTEL_FAM6_IVYBRIDGE_X,            idle_cpu_ivt),
 -      ICPU(INTEL_FAM6_HASWELL_CORE,           idle_cpu_hsw),
 -      ICPU(INTEL_FAM6_HASWELL_X,              idle_cpu_hsw),
 -      ICPU(INTEL_FAM6_HASWELL_ULT,            idle_cpu_hsw),
 -      ICPU(INTEL_FAM6_HASWELL_GT3E,           idle_cpu_hsw),
 -      ICPU(INTEL_FAM6_ATOM_SILVERMONT_X,      idle_cpu_avn),
 -      ICPU(INTEL_FAM6_BROADWELL_CORE,         idle_cpu_bdw),
 -      ICPU(INTEL_FAM6_BROADWELL_GT3E,         idle_cpu_bdw),
 -      ICPU(INTEL_FAM6_BROADWELL_X,            idle_cpu_bdw),
 -      ICPU(INTEL_FAM6_BROADWELL_XEON_D,       idle_cpu_bdw),
 -      ICPU(INTEL_FAM6_SKYLAKE_MOBILE,         idle_cpu_skl),
 -      ICPU(INTEL_FAM6_SKYLAKE_DESKTOP,        idle_cpu_skl),
 -      ICPU(INTEL_FAM6_KABYLAKE_MOBILE,        idle_cpu_skl),
 -      ICPU(INTEL_FAM6_KABYLAKE_DESKTOP,       idle_cpu_skl),
 -      ICPU(INTEL_FAM6_SKYLAKE_X,              idle_cpu_skx),
 -      ICPU(INTEL_FAM6_XEON_PHI_KNL,           idle_cpu_knl),
 -      ICPU(INTEL_FAM6_XEON_PHI_KNM,           idle_cpu_knl),
 -      ICPU(INTEL_FAM6_ATOM_GOLDMONT,          idle_cpu_bxt),
 -      ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS,     idle_cpu_bxt),
 -      ICPU(INTEL_FAM6_ATOM_GOLDMONT_X,        idle_cpu_dnv),
 +      INTEL_CPU_FAM6(NEHALEM_EP,              idle_cpu_nehalem),
 +      INTEL_CPU_FAM6(NEHALEM,                 idle_cpu_nehalem),
 +      INTEL_CPU_FAM6(NEHALEM_G,               idle_cpu_nehalem),
 +      INTEL_CPU_FAM6(WESTMERE,                idle_cpu_nehalem),
 +      INTEL_CPU_FAM6(WESTMERE_EP,             idle_cpu_nehalem),
 +      INTEL_CPU_FAM6(NEHALEM_EX,              idle_cpu_nehalem),
-       INTEL_CPU_FAM6(ATOM_PINEVIEW,           idle_cpu_atom),
-       INTEL_CPU_FAM6(ATOM_LINCROFT,           idle_cpu_lincroft),
++      INTEL_CPU_FAM6(ATOM_BONNELL,            idle_cpu_atom),
++      INTEL_CPU_FAM6(ATOM_BONNELL_MID,        idle_cpu_lincroft),
 +      INTEL_CPU_FAM6(WESTMERE_EX,             idle_cpu_nehalem),
 +      INTEL_CPU_FAM6(SANDYBRIDGE,             idle_cpu_snb),
 +      INTEL_CPU_FAM6(SANDYBRIDGE_X,           idle_cpu_snb),
-       INTEL_CPU_FAM6(ATOM_CEDARVIEW,          idle_cpu_atom),
-       INTEL_CPU_FAM6(ATOM_SILVERMONT1,        idle_cpu_byt),
-       INTEL_CPU_FAM6(ATOM_MERRIFIELD,         idle_cpu_tangier),
++      INTEL_CPU_FAM6(ATOM_SALTWELL,           idle_cpu_atom),
++      INTEL_CPU_FAM6(ATOM_SILVERMONT,         idle_cpu_byt),
++      INTEL_CPU_FAM6(ATOM_SILVERMONT_MID,     idle_cpu_tangier),
 +      INTEL_CPU_FAM6(ATOM_AIRMONT,            idle_cpu_cht),
 +      INTEL_CPU_FAM6(IVYBRIDGE,               idle_cpu_ivb),
 +      INTEL_CPU_FAM6(IVYBRIDGE_X,             idle_cpu_ivt),
 +      INTEL_CPU_FAM6(HASWELL_CORE,            idle_cpu_hsw),
 +      INTEL_CPU_FAM6(HASWELL_X,               idle_cpu_hsw),
 +      INTEL_CPU_FAM6(HASWELL_ULT,             idle_cpu_hsw),
 +      INTEL_CPU_FAM6(HASWELL_GT3E,            idle_cpu_hsw),
-       INTEL_CPU_FAM6(ATOM_SILVERMONT2,        idle_cpu_avn),
++      INTEL_CPU_FAM6(ATOM_SILVERMONT_X,       idle_cpu_avn),
 +      INTEL_CPU_FAM6(BROADWELL_CORE,          idle_cpu_bdw),
 +      INTEL_CPU_FAM6(BROADWELL_GT3E,          idle_cpu_bdw),
 +      INTEL_CPU_FAM6(BROADWELL_X,             idle_cpu_bdw),
 +      INTEL_CPU_FAM6(BROADWELL_XEON_D,        idle_cpu_bdw),
 +      INTEL_CPU_FAM6(SKYLAKE_MOBILE,          idle_cpu_skl),
 +      INTEL_CPU_FAM6(SKYLAKE_DESKTOP,         idle_cpu_skl),
 +      INTEL_CPU_FAM6(KABYLAKE_MOBILE,         idle_cpu_skl),
 +      INTEL_CPU_FAM6(KABYLAKE_DESKTOP,        idle_cpu_skl),
 +      INTEL_CPU_FAM6(SKYLAKE_X,               idle_cpu_skx),
 +      INTEL_CPU_FAM6(XEON_PHI_KNL,            idle_cpu_knl),
 +      INTEL_CPU_FAM6(XEON_PHI_KNM,            idle_cpu_knl),
 +      INTEL_CPU_FAM6(ATOM_GOLDMONT,           idle_cpu_bxt),
-       INTEL_CPU_FAM6(ATOM_GEMINI_LAKE,        idle_cpu_bxt),
-       INTEL_CPU_FAM6(ATOM_DENVERTON,          idle_cpu_dnv),
++      INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS,      idle_cpu_bxt),
++      INTEL_CPU_FAM6(ATOM_GOLDMONT_X,         idle_cpu_dnv),
        {}
  };
  
@@@ -1319,7 -1322,7 +1319,7 @@@ static void intel_idle_state_table_upda
                ivt_idle_state_table_update();
                break;
        case INTEL_FAM6_ATOM_GOLDMONT:
-       case INTEL_FAM6_ATOM_GEMINI_LAKE:
+       case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
                bxt_idle_state_table_update();
                break;
        case INTEL_FAM6_SKYLAKE_DESKTOP:
index 82c9b9326e9e5635cd570de9c7706c74ac9fffda,cca6cde1b7e84b68840ffb259664ec3243d6fd7c..057e24f4a620f692add8f632995872f5bcb88500
@@@ -76,7 -76,6 +76,7 @@@ struct sdhci_acpi_slot 
        size_t          priv_size;
        int (*probe_slot)(struct platform_device *, const char *, const char *);
        int (*remove_slot)(struct platform_device *);
 +      int (*free_slot)(struct platform_device *pdev);
        int (*setup_host)(struct platform_device *pdev);
  };
  
@@@ -247,7 -246,7 +247,7 @@@ static const struct sdhci_acpi_chip sdh
  static bool sdhci_acpi_byt(void)
  {
        static const struct x86_cpu_id byt[] = {
-               { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
+               { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
                {}
        };
  
@@@ -471,70 -470,10 +471,70 @@@ static const struct sdhci_acpi_slot sdh
        .priv_size      = sizeof(struct intel_host),
  };
  
 +#define VENDOR_SPECIFIC_PWRCTL_CLEAR_REG      0x1a8
 +#define VENDOR_SPECIFIC_PWRCTL_CTL_REG                0x1ac
 +static irqreturn_t sdhci_acpi_qcom_handler(int irq, void *ptr)
 +{
 +      struct sdhci_host *host = ptr;
 +
 +      sdhci_writel(host, 0x3, VENDOR_SPECIFIC_PWRCTL_CLEAR_REG);
 +      sdhci_writel(host, 0x1, VENDOR_SPECIFIC_PWRCTL_CTL_REG);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +static int qcom_probe_slot(struct platform_device *pdev, const char *hid,
 +                         const char *uid)
 +{
 +      struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
 +      struct sdhci_host *host = c->host;
 +      int *irq = sdhci_acpi_priv(c);
 +
 +      *irq = -EINVAL;
 +
 +      if (strcmp(hid, "QCOM8051"))
 +              return 0;
 +
 +      *irq = platform_get_irq(pdev, 1);
 +      if (*irq < 0)
 +              return 0;
 +
 +      return request_threaded_irq(*irq, NULL, sdhci_acpi_qcom_handler,
 +                                  IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
 +                                  "sdhci_qcom", host);
 +}
 +
 +static int qcom_free_slot(struct platform_device *pdev)
 +{
 +      struct device *dev = &pdev->dev;
 +      struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
 +      struct sdhci_host *host = c->host;
 +      struct acpi_device *adev;
 +      int *irq = sdhci_acpi_priv(c);
 +      const char *hid;
 +
 +      adev = ACPI_COMPANION(dev);
 +      if (!adev)
 +              return -ENODEV;
 +
 +      hid = acpi_device_hid(adev);
 +      if (strcmp(hid, "QCOM8051"))
 +              return 0;
 +
 +      if (*irq < 0)
 +              return 0;
 +
 +      free_irq(*irq, host);
 +      return 0;
 +}
 +
  static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = {
        .quirks  = SDHCI_QUIRK_BROKEN_CARD_DETECTION,
        .quirks2 = SDHCI_QUIRK2_NO_1_8_V,
        .caps    = MMC_CAP_NONREMOVABLE,
 +      .priv_size      = sizeof(int),
 +      .probe_slot     = qcom_probe_slot,
 +      .free_slot      = qcom_free_slot,
  };
  
  static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = {
@@@ -817,9 -756,6 +817,9 @@@ static int sdhci_acpi_probe(struct plat
  err_cleanup:
        sdhci_cleanup_host(c->host);
  err_free:
 +      if (c->slot && c->slot->free_slot)
 +              c->slot->free_slot(pdev);
 +
        sdhci_free_host(c->host);
        return err;
  }
@@@ -841,10 -777,6 +841,10 @@@ static int sdhci_acpi_remove(struct pla
  
        dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0);
        sdhci_remove_host(c->host, dead);
 +
 +      if (c->slot && c->slot->free_slot)
 +              c->slot->free_slot(pdev);
 +
        sdhci_free_host(c->host);
  
        return 0;
index bb92874b1175ecc5f129c56c19a911f6d206ef0e,8cbfcce57a0680d078f692a3db5a500b9afd0917..6cdb2c14eee4c33db1ec24e3fcfebe62a03b9a5d
@@@ -1133,40 -1133,47 +1133,40 @@@ static const struct rapl_defaults rapl_
        .compute_time_window = rapl_compute_time_window_atom,
  };
  
 -#define RAPL_CPU(_model, _ops) {                      \
 -              .vendor = X86_VENDOR_INTEL,             \
 -              .family = 6,                            \
 -              .model = _model,                        \
 -              .driver_data = (kernel_ulong_t)&_ops,   \
 -              }
 -
  static const struct x86_cpu_id rapl_ids[] __initconst = {
 -      RAPL_CPU(INTEL_FAM6_SANDYBRIDGE,        rapl_defaults_core),
 -      RAPL_CPU(INTEL_FAM6_SANDYBRIDGE_X,      rapl_defaults_core),
 -
 -      RAPL_CPU(INTEL_FAM6_IVYBRIDGE,          rapl_defaults_core),
 -      RAPL_CPU(INTEL_FAM6_IVYBRIDGE_X,        rapl_defaults_core),
 -
 -      RAPL_CPU(INTEL_FAM6_HASWELL_CORE,       rapl_defaults_core),
 -      RAPL_CPU(INTEL_FAM6_HASWELL_ULT,        rapl_defaults_core),
 -      RAPL_CPU(INTEL_FAM6_HASWELL_GT3E,       rapl_defaults_core),
 -      RAPL_CPU(INTEL_FAM6_HASWELL_X,          rapl_defaults_hsw_server),
 -
 -      RAPL_CPU(INTEL_FAM6_BROADWELL_CORE,     rapl_defaults_core),
 -      RAPL_CPU(INTEL_FAM6_BROADWELL_GT3E,     rapl_defaults_core),
 -      RAPL_CPU(INTEL_FAM6_BROADWELL_XEON_D,   rapl_defaults_core),
 -      RAPL_CPU(INTEL_FAM6_BROADWELL_X,        rapl_defaults_hsw_server),
 -
 -      RAPL_CPU(INTEL_FAM6_SKYLAKE_DESKTOP,    rapl_defaults_core),
 -      RAPL_CPU(INTEL_FAM6_SKYLAKE_MOBILE,     rapl_defaults_core),
 -      RAPL_CPU(INTEL_FAM6_SKYLAKE_X,          rapl_defaults_hsw_server),
 -      RAPL_CPU(INTEL_FAM6_KABYLAKE_MOBILE,    rapl_defaults_core),
 -      RAPL_CPU(INTEL_FAM6_KABYLAKE_DESKTOP,   rapl_defaults_core),
 -      RAPL_CPU(INTEL_FAM6_CANNONLAKE_MOBILE,  rapl_defaults_core),
 -
 -      RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT,    rapl_defaults_byt),
 -      RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT,       rapl_defaults_cht),
 -      RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT_MID,        rapl_defaults_tng),
 -      RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT_MID,   rapl_defaults_ann),
 -      RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT,      rapl_defaults_core),
 -      RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, rapl_defaults_core),
 -      RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_X,    rapl_defaults_core),
 -
 -      RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL,       rapl_defaults_hsw_server),
 -      RAPL_CPU(INTEL_FAM6_XEON_PHI_KNM,       rapl_defaults_hsw_server),
 +      INTEL_CPU_FAM6(SANDYBRIDGE,             rapl_defaults_core),
 +      INTEL_CPU_FAM6(SANDYBRIDGE_X,           rapl_defaults_core),
 +
 +      INTEL_CPU_FAM6(IVYBRIDGE,               rapl_defaults_core),
 +      INTEL_CPU_FAM6(IVYBRIDGE_X,             rapl_defaults_core),
 +
 +      INTEL_CPU_FAM6(HASWELL_CORE,            rapl_defaults_core),
 +      INTEL_CPU_FAM6(HASWELL_ULT,             rapl_defaults_core),
 +      INTEL_CPU_FAM6(HASWELL_GT3E,            rapl_defaults_core),
 +      INTEL_CPU_FAM6(HASWELL_X,               rapl_defaults_hsw_server),
 +
 +      INTEL_CPU_FAM6(BROADWELL_CORE,          rapl_defaults_core),
 +      INTEL_CPU_FAM6(BROADWELL_GT3E,          rapl_defaults_core),
 +      INTEL_CPU_FAM6(BROADWELL_XEON_D,        rapl_defaults_core),
 +      INTEL_CPU_FAM6(BROADWELL_X,             rapl_defaults_hsw_server),
 +
 +      INTEL_CPU_FAM6(SKYLAKE_DESKTOP,         rapl_defaults_core),
 +      INTEL_CPU_FAM6(SKYLAKE_MOBILE,          rapl_defaults_core),
 +      INTEL_CPU_FAM6(SKYLAKE_X,               rapl_defaults_hsw_server),
 +      INTEL_CPU_FAM6(KABYLAKE_MOBILE,         rapl_defaults_core),
 +      INTEL_CPU_FAM6(KABYLAKE_DESKTOP,        rapl_defaults_core),
 +      INTEL_CPU_FAM6(CANNONLAKE_MOBILE,       rapl_defaults_core),
 +
-       INTEL_CPU_FAM6(ATOM_SILVERMONT1,        rapl_defaults_byt),
++      INTEL_CPU_FAM6(ATOM_SILVERMONT,         rapl_defaults_byt),
 +      INTEL_CPU_FAM6(ATOM_AIRMONT,            rapl_defaults_cht),
-       INTEL_CPU_FAM6(ATOM_MERRIFIELD,         rapl_defaults_tng),
-       INTEL_CPU_FAM6(ATOM_MOOREFIELD,         rapl_defaults_ann),
++      INTEL_CPU_FAM6(ATOM_SILVERMONT_MID,     rapl_defaults_tng),
++      INTEL_CPU_FAM6(ATOM_AIRMONT_MID,        rapl_defaults_ann),
 +      INTEL_CPU_FAM6(ATOM_GOLDMONT,           rapl_defaults_core),
-       INTEL_CPU_FAM6(ATOM_GEMINI_LAKE,        rapl_defaults_core),
-       INTEL_CPU_FAM6(ATOM_DENVERTON,          rapl_defaults_core),
++      INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS,      rapl_defaults_core),
++      INTEL_CPU_FAM6(ATOM_GOLDMONT_X,         rapl_defaults_core),
 +
 +      INTEL_CPU_FAM6(XEON_PHI_KNL,            rapl_defaults_hsw_server),
 +      INTEL_CPU_FAM6(XEON_PHI_KNM,            rapl_defaults_hsw_server),
        {}
  };
  MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
diff --combined tools/perf/Makefile.perf
index 0be41169537908ce471e8d9fba99a6cd5e5fe263,92514fb3689f0c38121323aa417896742d8748b4..2f3bf025e3050f94252c5ebd0a2ebdd18a5ce819
@@@ -635,7 -635,7 +635,7 @@@ $(LIBPERF_IN): prepare FORC
  $(LIB_FILE): $(LIBPERF_IN)
        $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN) $(LIB_OBJS)
  
 -LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ)
 +LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(LDFLAGS)'
  
  $(LIBTRACEEVENT): FORCE
        $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a
@@@ -779,7 -779,9 +779,9 @@@ endi
  ifndef NO_LIBBPF
        $(call QUIET_INSTALL, bpf-headers) \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
-               $(INSTALL) include/bpf/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
+               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf/linux'; \
+               $(INSTALL) include/bpf/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
+               $(INSTALL) include/bpf/linux/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf/linux'
        $(call QUIET_INSTALL, bpf-examples) \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'; \
                $(INSTALL) examples/bpf/*.c -t '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
index b2188e623e229e4ad0ef78e9ad130e81e9cd4609,c0703979c51d086aca2c5b5f4bb3e964e22bd856..257c9c18cb7e6ffd825133e600abbec55356662f
@@@ -201,14 -201,13 +201,13 @@@ static void setup_forced_leader(struct 
                perf_evlist__force_leader(evlist);
  }
  
- static int process_feature_event(struct perf_tool *tool,
-                                union perf_event *event,
-                                struct perf_session *session __maybe_unused)
+ static int process_feature_event(struct perf_session *session,
+                                union perf_event *event)
  {
-       struct report *rep = container_of(tool, struct report, tool);
+       struct report *rep = container_of(session->tool, struct report, tool);
  
        if (event->feat.feat_id < HEADER_LAST_FEATURE)
-               return perf_event__process_feature(tool, event, session);
+               return perf_event__process_feature(session, event);
  
        if (event->feat.feat_id != HEADER_LAST_FEATURE) {
                pr_err("failed: wrong feature ID: %" PRIu64 "\n",
@@@ -981,7 -980,6 +980,7 @@@ int cmd_report(int argc, const char **a
                        .id_index        = perf_event__process_id_index,
                        .auxtrace_info   = perf_event__process_auxtrace_info,
                        .auxtrace        = perf_event__process_auxtrace,
 +                      .event_update    = perf_event__process_event_update,
                        .feature         = process_feature_event,
                        .ordered_events  = true,
                        .ordering_requires_timestamps = true,
        OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
                     "how to display percentage of filtered entries", parse_filter_percentage),
        OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
-                           "Instruction Tracing options",
+                           "Instruction Tracing options\n" ITRACE_HELP,
                            itrace_parse_synth_opts),
        OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename,
                        "Show full source file name path for source lines"),
diff --combined tools/perf/util/evsel.c
index e596ae358c4d423fcc49f0512c5123ae2a35f3fd,cb7f010599401c2eb455944243a5131bede26770..29d7b97f66fbc5ae8efe67d3880263711d0a4d1a
@@@ -1089,9 -1089,6 +1089,9 @@@ void perf_evsel__config(struct perf_evs
                attr->exclude_user   = 1;
        }
  
 +      if (evsel->own_cpus)
 +              evsel->attr.read_format |= PERF_FORMAT_ID;
 +
        /*
         * Apply event specific term settings,
         * it overloads any global configuration.
@@@ -2685,7 -2682,7 +2685,7 @@@ int perf_event__synthesize_sample(unio
        return 0;
  }
  
- struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
+ struct tep_format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
  {
        return tep_find_field(evsel->tp_format, name);
  }
  void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
                         const char *name)
  {
-       struct format_field *field = perf_evsel__field(evsel, name);
+       struct tep_format_field *field = perf_evsel__field(evsel, name);
        int offset;
  
        if (!field)
  
        offset = field->offset;
  
-       if (field->flags & FIELD_IS_DYNAMIC) {
+       if (field->flags & TEP_FIELD_IS_DYNAMIC) {
                offset = *(int *)(sample->raw_data + field->offset);
                offset &= 0xffff;
        }
        return sample->raw_data + offset;
  }
  
- u64 format_field__intval(struct format_field *field, struct perf_sample *sample,
+ u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample,
                         bool needs_swap)
  {
        u64 value;
  u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
                       const char *name)
  {
-       struct format_field *field = perf_evsel__field(evsel, name);
+       struct tep_format_field *field = perf_evsel__field(evsel, name);
  
        if (!field)
                return 0;
@@@ -2943,3 -2940,32 +2943,32 @@@ struct perf_env *perf_evsel__env(struc
                return evsel->evlist->env;
        return NULL;
  }
+ static int store_evsel_ids(struct perf_evsel *evsel, struct perf_evlist *evlist)
+ {
+       int cpu, thread;
+       for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
+               for (thread = 0; thread < xyarray__max_y(evsel->fd);
+                    thread++) {
+                       int fd = FD(evsel, cpu, thread);
+                       if (perf_evlist__id_add_fd(evlist, evsel,
+                                                  cpu, thread, fd) < 0)
+                               return -1;
+               }
+       }
+       return 0;
+ }
+ int perf_evsel__store_ids(struct perf_evsel *evsel, struct perf_evlist *evlist)
+ {
+       struct cpu_map *cpus = evsel->cpus;
+       struct thread_map *threads = evsel->threads;
+       if (perf_evsel__alloc_id(evsel, cpus->nr, threads->nr))
+               return -ENOMEM;
+       return store_evsel_ids(evsel, evlist);
+ }