1 // SPDX-License-Identifier: GPL-2.0
3 * Resource Director Technology (RDT)
5 * Pseudo-locking support built on top of Cache Allocation Technology (CAT)
7 * Copyright (C) 2018 Intel Corporation
9 * Author: Reinette Chatre <reinette.chatre@intel.com>
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/cacheinfo.h>
15 #include <linux/cpu.h>
16 #include <linux/cpumask.h>
17 #include <linux/debugfs.h>
18 #include <linux/kthread.h>
19 #include <linux/mman.h>
20 #include <linux/perf_event.h>
21 #include <linux/pm_qos.h>
22 #include <linux/slab.h>
23 #include <linux/uaccess.h>
25 #include <asm/cacheflush.h>
26 #include <asm/intel-family.h>
27 #include <asm/resctrl.h>
28 #include <asm/perf_event.h>
30 #include "../../events/perf_event.h" /* For X86_CONFIG() */
33 #define CREATE_TRACE_POINTS
34 #include "pseudo_lock_event.h"
37 * The bits needed to disable hardware prefetching varies based on the
38 * platform. During initialization we will discover which bits to use.
40 static u64 prefetch_disable_bits;
43 * Major number assigned to and shared by all devices exposing
44 * pseudo-locked regions.
46 static unsigned int pseudo_lock_major;
47 static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0);
49 static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode)
51 const struct rdtgroup *rdtgrp;
53 rdtgrp = dev_get_drvdata(dev);
56 return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name);
59 static const struct class pseudo_lock_class = {
60 .name = "pseudo_lock",
61 .devnode = pseudo_lock_devnode,
65 * get_prefetch_disable_bits - prefetch disable bits of supported platforms
66 * @void: It takes no parameters.
68 * Capture the list of platforms that have been validated to support
69 * pseudo-locking. This includes testing to ensure pseudo-locked regions
70 * with low cache miss rates can be created under variety of load conditions
71 * as well as that these pseudo-locked regions can maintain their low cache
72 * miss rates under variety of load conditions for significant lengths of time.
74 * After a platform has been validated to support pseudo-locking its
75 * hardware prefetch disable bits are included here as they are documented
78 * When adding a platform here also add support for its cache events to
79 * measure_cycles_perf_fn()
82 * If platform is supported, the bits to disable hardware prefetchers, 0
83 * if platform is not supported.
85 static u64 get_prefetch_disable_bits(void)
87 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
88 boot_cpu_data.x86 != 6)
91 switch (boot_cpu_data.x86_model) {
92 case INTEL_FAM6_BROADWELL_X:
94 * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
96 * 0 L2 Hardware Prefetcher Disable (R/W)
97 * 1 L2 Adjacent Cache Line Prefetcher Disable (R/W)
98 * 2 DCU Hardware Prefetcher Disable (R/W)
99 * 3 DCU IP Prefetcher Disable (R/W)
103 case INTEL_FAM6_ATOM_GOLDMONT:
104 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
106 * SDM defines bits of MSR_MISC_FEATURE_CONTROL register
108 * 0 L2 Hardware Prefetcher Disable (R/W)
110 * 2 DCU Hardware Prefetcher Disable (R/W)
120 * pseudo_lock_minor_get - Obtain available minor number
121 * @minor: Pointer to where new minor number will be stored
123 * A bitmask is used to track available minor numbers. Here the next free
124 * minor number is marked as unavailable and returned.
126 * Return: 0 on success, <0 on failure.
128 static int pseudo_lock_minor_get(unsigned int *minor)
130 unsigned long first_bit;
132 first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS);
134 if (first_bit == MINORBITS)
137 __clear_bit(first_bit, &pseudo_lock_minor_avail);
144 * pseudo_lock_minor_release - Return minor number to available
145 * @minor: The minor number made available
147 static void pseudo_lock_minor_release(unsigned int minor)
149 __set_bit(minor, &pseudo_lock_minor_avail);
153 * region_find_by_minor - Locate a pseudo-lock region by inode minor number
154 * @minor: The minor number of the device representing pseudo-locked region
156 * When the character device is accessed we need to determine which
157 * pseudo-locked region it belongs to. This is done by matching the minor
158 * number of the device to the pseudo-locked region it belongs.
160 * Minor numbers are assigned at the time a pseudo-locked region is associated
161 * with a cache instance.
163 * Return: On success return pointer to resource group owning the pseudo-locked
164 * region, NULL on failure.
166 static struct rdtgroup *region_find_by_minor(unsigned int minor)
168 struct rdtgroup *rdtgrp, *rdtgrp_match = NULL;
170 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
171 if (rdtgrp->plr && rdtgrp->plr->minor == minor) {
172 rdtgrp_match = rdtgrp;
180 * struct pseudo_lock_pm_req - A power management QoS request list entry
181 * @list: Entry within the @pm_reqs list for a pseudo-locked region
182 * @req: PM QoS request
184 struct pseudo_lock_pm_req {
185 struct list_head list;
186 struct dev_pm_qos_request req;
189 static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
191 struct pseudo_lock_pm_req *pm_req, *next;
193 list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) {
194 dev_pm_qos_remove_request(&pm_req->req);
195 list_del(&pm_req->list);
201 * pseudo_lock_cstates_constrain - Restrict cores from entering C6
202 * @plr: Pseudo-locked region
204 * To prevent the cache from being affected by power management entering
205 * C6 has to be avoided. This is accomplished by requesting a latency
206 * requirement lower than lowest C6 exit latency of all supported
207 * platforms as found in the cpuidle state tables in the intel_idle driver.
208 * At this time it is possible to do so with a single latency requirement
209 * for all supported platforms.
211 * Since Goldmont is supported, which is affected by X86_BUG_MONITOR,
212 * the ACPI latencies need to be considered while keeping in mind that C2
213 * may be set to map to deeper sleep states. In this case the latency
214 * requirement needs to prevent entering C2 also.
216 * Return: 0 on success, <0 on failure
218 static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
220 struct pseudo_lock_pm_req *pm_req;
224 for_each_cpu(cpu, &plr->d->cpu_mask) {
225 pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
227 rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n");
231 ret = dev_pm_qos_add_request(get_cpu_device(cpu),
233 DEV_PM_QOS_RESUME_LATENCY,
236 rdt_last_cmd_printf("Failed to add latency req CPU%d\n",
242 list_add(&pm_req->list, &plr->pm_reqs);
248 pseudo_lock_cstates_relax(plr);
253 * pseudo_lock_region_clear - Reset pseudo-lock region data
254 * @plr: pseudo-lock region
256 * All content of the pseudo-locked region is reset - any memory allocated
261 static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
272 plr->debugfs_dir = NULL;
276 * pseudo_lock_region_init - Initialize pseudo-lock region information
277 * @plr: pseudo-lock region
279 * Called after user provided a schemata to be pseudo-locked. From the
280 * schemata the &struct pseudo_lock_region is on entry already initialized
281 * with the resource, domain, and capacity bitmask. Here the information
282 * required for pseudo-locking is deduced from this data and &struct
283 * pseudo_lock_region initialized further. This information includes:
284 * - size in bytes of the region to be pseudo-locked
285 * - cache line size to know the stride with which data needs to be accessed
286 * to be pseudo-locked
287 * - a cpu associated with the cache instance on which the pseudo-locking
288 * flow can be executed
290 * Return: 0 on success, <0 on failure. Descriptive error will be written
291 * to last_cmd_status buffer.
293 static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
295 struct cpu_cacheinfo *ci;
299 /* Pick the first cpu we find that is associated with the cache. */
300 plr->cpu = cpumask_first(&plr->d->cpu_mask);
302 if (!cpu_online(plr->cpu)) {
303 rdt_last_cmd_printf("CPU %u associated with cache not online\n",
309 ci = get_cpu_cacheinfo(plr->cpu);
311 plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm);
313 for (i = 0; i < ci->num_leaves; i++) {
314 if (ci->info_list[i].level == plr->s->res->cache_level) {
315 plr->line_size = ci->info_list[i].coherency_line_size;
321 rdt_last_cmd_puts("Unable to determine cache line size\n");
323 pseudo_lock_region_clear(plr);
328 * pseudo_lock_init - Initialize a pseudo-lock region
329 * @rdtgrp: resource group to which new pseudo-locked region will belong
331 * A pseudo-locked region is associated with a resource group. When this
332 * association is created the pseudo-locked region is initialized. The
333 * details of the pseudo-locked region are not known at this time so only
334 * allocation is done and association established.
336 * Return: 0 on success, <0 on failure
338 static int pseudo_lock_init(struct rdtgroup *rdtgrp)
340 struct pseudo_lock_region *plr;
342 plr = kzalloc(sizeof(*plr), GFP_KERNEL);
346 init_waitqueue_head(&plr->lock_thread_wq);
347 INIT_LIST_HEAD(&plr->pm_reqs);
353 * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked
354 * @plr: pseudo-lock region
356 * Initialize the details required to set up the pseudo-locked region and
357 * allocate the contiguous memory that will be pseudo-locked to the cache.
359 * Return: 0 on success, <0 on failure. Descriptive error will be written
360 * to last_cmd_status buffer.
362 static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
366 ret = pseudo_lock_region_init(plr);
371 * We do not yet support contiguous regions larger than
374 if (plr->size > KMALLOC_MAX_SIZE) {
375 rdt_last_cmd_puts("Requested region exceeds maximum size\n");
380 plr->kmem = kzalloc(plr->size, GFP_KERNEL);
382 rdt_last_cmd_puts("Unable to allocate memory\n");
390 pseudo_lock_region_clear(plr);
396 * pseudo_lock_free - Free a pseudo-locked region
397 * @rdtgrp: resource group to which pseudo-locked region belonged
399 * The pseudo-locked region's resources have already been released, or not
400 * yet created at this point. Now it can be freed and disassociated from the
405 static void pseudo_lock_free(struct rdtgroup *rdtgrp)
407 pseudo_lock_region_clear(rdtgrp->plr);
413 * pseudo_lock_fn - Load kernel memory into cache
414 * @_rdtgrp: resource group to which pseudo-lock region belongs
416 * This is the core pseudo-locking flow.
418 * First we ensure that the kernel memory cannot be found in the cache.
419 * Then, while taking care that there will be as little interference as
420 * possible, the memory to be loaded is accessed while core is running
421 * with class of service set to the bitmask of the pseudo-locked region.
422 * After this is complete no future CAT allocations will be allowed to
423 * overlap with this bitmask.
425 * Local register variables are utilized to ensure that the memory region
426 * to be locked is the only memory access made during the critical locking
429 * Return: 0. Waiter on waitqueue will be woken on completion.
431 static int pseudo_lock_fn(void *_rdtgrp)
433 struct rdtgroup *rdtgrp = _rdtgrp;
434 struct pseudo_lock_region *plr = rdtgrp->plr;
435 u32 rmid_p, closid_p;
440 * The registers used for local register variables are also used
441 * when KASAN is active. When KASAN is active we use a regular
442 * variable to ensure we always use a valid pointer, but the cost
443 * is that this variable will enter the cache through evicting the
444 * memory we are trying to lock into the cache. Thus expect lower
445 * pseudo-locking success rate when KASAN is active.
447 unsigned int line_size;
451 register unsigned int line_size asm("esi");
452 register unsigned int size asm("edi");
453 register void *mem_r asm(_ASM_BX);
454 #endif /* CONFIG_KASAN */
457 * Make sure none of the allocated memory is cached. If it is we
458 * will get a cache hit in below loop from outside of pseudo-locked
460 * wbinvd (as opposed to clflush/clflushopt) is required to
461 * increase likelihood that allocated cache portion will be filled
462 * with associated memory.
467 * Always called with interrupts enabled. By disabling interrupts
468 * ensure that we will not be preempted during this critical section.
473 * Call wrmsr and rdmsr as directly as possible to avoid tracing
474 * clobbering local register variables or affecting cache accesses.
476 * Disable the hardware prefetcher so that when the end of the memory
477 * being pseudo-locked is reached the hardware will not read beyond
478 * the buffer and evict pseudo-locked memory read earlier from the
481 saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL);
482 __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
483 closid_p = this_cpu_read(pqr_state.cur_closid);
484 rmid_p = this_cpu_read(pqr_state.cur_rmid);
487 line_size = plr->line_size;
489 * Critical section begin: start by writing the closid associated
490 * with the capacity bitmask of the cache region being
491 * pseudo-locked followed by reading of kernel memory to load it
494 __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, rdtgrp->closid);
496 * Cache was flushed earlier. Now access kernel memory to read it
497 * into cache region associated with just activated plr->closid.
498 * Loop over data twice:
499 * - In first loop the cache region is shared with the page walker
500 * as it populates the paging structure caches (including TLB).
501 * - In the second loop the paging structure caches are used and
502 * cache region is populated with the memory being referenced.
504 for (i = 0; i < size; i += PAGE_SIZE) {
506 * Add a barrier to prevent speculative execution of this
507 * loop reading beyond the end of the buffer.
510 asm volatile("mov (%0,%1,1), %%eax\n\t"
512 : "r" (mem_r), "r" (i)
515 for (i = 0; i < size; i += line_size) {
517 * Add a barrier to prevent speculative execution of this
518 * loop reading beyond the end of the buffer.
521 asm volatile("mov (%0,%1,1), %%eax\n\t"
523 : "r" (mem_r), "r" (i)
527 * Critical section end: restore closid with capacity bitmask that
528 * does not overlap with pseudo-locked region.
530 __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p);
532 /* Re-enable the hardware prefetcher(s) */
533 wrmsrl(MSR_MISC_FEATURE_CONTROL, saved_msr);
536 plr->thread_done = 1;
537 wake_up_interruptible(&plr->lock_thread_wq);
542 * rdtgroup_monitor_in_progress - Test if monitoring in progress
543 * @rdtgrp: resource group being queried
545 * Return: 1 if monitor groups have been created for this resource
546 * group, 0 otherwise.
548 static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp)
550 return !list_empty(&rdtgrp->mon.crdtgrp_list);
554 * rdtgroup_locksetup_user_restrict - Restrict user access to group
555 * @rdtgrp: resource group needing access restricted
557 * A resource group used for cache pseudo-locking cannot have cpus or tasks
558 * assigned to it. This is communicated to the user by restricting access
559 * to all the files that can be used to make such changes.
561 * Permissions restored with rdtgroup_locksetup_user_restore()
563 * Return: 0 on success, <0 on failure. If a failure occurs during the
564 * restriction of access an attempt will be made to restore permissions but
565 * the state of the mode of these files will be uncertain when a failure
568 static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp)
572 ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
576 ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
580 ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
584 if (resctrl_arch_mon_capable()) {
585 ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups");
594 rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
596 rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
598 rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
604 * rdtgroup_locksetup_user_restore - Restore user access to group
605 * @rdtgrp: resource group needing access restored
607 * Restore all file access previously removed using
608 * rdtgroup_locksetup_user_restrict()
610 * Return: 0 on success, <0 on failure. If a failure occurs during the
611 * restoration of access an attempt will be made to restrict permissions
612 * again but the state of the mode of these files will be uncertain when
615 static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp)
619 ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777);
623 ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777);
627 ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777);
631 if (resctrl_arch_mon_capable()) {
632 ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777);
641 rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list");
643 rdtgroup_kn_mode_restrict(rdtgrp, "cpus");
645 rdtgroup_kn_mode_restrict(rdtgrp, "tasks");
651 * rdtgroup_locksetup_enter - Resource group enters locksetup mode
652 * @rdtgrp: resource group requested to enter locksetup mode
654 * A resource group enters locksetup mode to reflect that it would be used
655 * to represent a pseudo-locked region and is in the process of being set
656 * up to do so. A resource group used for a pseudo-locked region would
657 * lose the closid associated with it so we cannot allow it to have any
658 * tasks or cpus assigned nor permit tasks or cpus to be assigned in the
659 * future. Monitoring of a pseudo-locked region is not allowed either.
661 * The above and more restrictions on a pseudo-locked region are checked
662 * for and enforced before the resource group enters the locksetup mode.
664 * Returns: 0 if the resource group successfully entered locksetup mode, <0
665 * on failure. On failure the last_cmd_status buffer is updated with text to
666 * communicate details of failure to the user.
668 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
673 * The default resource group can neither be removed nor lose the
674 * default closid associated with it.
676 if (rdtgrp == &rdtgroup_default) {
677 rdt_last_cmd_puts("Cannot pseudo-lock default group\n");
682 * Cache Pseudo-locking not supported when CDP is enabled.
684 * Some things to consider if you would like to enable this
685 * support (using L3 CDP as example):
686 * - When CDP is enabled two separate resources are exposed,
687 * L3DATA and L3CODE, but they are actually on the same cache.
688 * The implication for pseudo-locking is that if a
689 * pseudo-locked region is created on a domain of one
690 * resource (eg. L3CODE), then a pseudo-locked region cannot
691 * be created on that same domain of the other resource
692 * (eg. L3DATA). This is because the creation of a
693 * pseudo-locked region involves a call to wbinvd that will
694 * affect all cache allocations on particular domain.
695 * - Considering the previous, it may be possible to only
696 * expose one of the CDP resources to pseudo-locking and
697 * hide the other. For example, we could consider to only
698 * expose L3DATA and since the L3 cache is unified it is
699 * still possible to place instructions there are execute it.
700 * - If only one region is exposed to pseudo-locking we should
701 * still keep in mind that availability of a portion of cache
702 * for pseudo-locking should take into account both resources.
703 * Similarly, if a pseudo-locked region is created in one
704 * resource, the portion of cache used by it should be made
705 * unavailable to all future allocations from both resources.
707 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) ||
708 resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) {
709 rdt_last_cmd_puts("CDP enabled\n");
714 * Not knowing the bits to disable prefetching implies that this
715 * platform does not support Cache Pseudo-Locking.
717 prefetch_disable_bits = get_prefetch_disable_bits();
718 if (prefetch_disable_bits == 0) {
719 rdt_last_cmd_puts("Pseudo-locking not supported\n");
723 if (rdtgroup_monitor_in_progress(rdtgrp)) {
724 rdt_last_cmd_puts("Monitoring in progress\n");
728 if (rdtgroup_tasks_assigned(rdtgrp)) {
729 rdt_last_cmd_puts("Tasks assigned to resource group\n");
733 if (!cpumask_empty(&rdtgrp->cpu_mask)) {
734 rdt_last_cmd_puts("CPUs assigned to resource group\n");
738 if (rdtgroup_locksetup_user_restrict(rdtgrp)) {
739 rdt_last_cmd_puts("Unable to modify resctrl permissions\n");
743 ret = pseudo_lock_init(rdtgrp);
745 rdt_last_cmd_puts("Unable to init pseudo-lock region\n");
750 * If this system is capable of monitoring a rmid would have been
751 * allocated when the control group was created. This is not needed
752 * anymore when this group would be used for pseudo-locking. This
753 * is safe to call on platforms not capable of monitoring.
755 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
761 rdtgroup_locksetup_user_restore(rdtgrp);
767 * rdtgroup_locksetup_exit - resource group exist locksetup mode
768 * @rdtgrp: resource group
770 * When a resource group exits locksetup mode the earlier restrictions are
773 * Return: 0 on success, <0 on failure
775 int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
779 if (resctrl_arch_mon_capable()) {
780 ret = alloc_rmid(rdtgrp->closid);
782 rdt_last_cmd_puts("Out of RMIDs\n");
785 rdtgrp->mon.rmid = ret;
788 ret = rdtgroup_locksetup_user_restore(rdtgrp);
790 free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
794 pseudo_lock_free(rdtgrp);
799 * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
803 * @d represents a cache instance and @cbm a capacity bitmask that is
804 * considered for it. Determine if @cbm overlaps with any existing
805 * pseudo-locked region on @d.
807 * @cbm is unsigned long, even if only 32 bits are used, to make the
808 * bitmap functions work correctly.
810 * Return: true if @cbm overlaps with pseudo-locked region on @d, false
813 bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm)
815 unsigned int cbm_len;
819 cbm_len = d->plr->s->res->cache.cbm_len;
821 if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
828 * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy
829 * @d: RDT domain under test
831 * The setup of a pseudo-locked region affects all cache instances within
832 * the hierarchy of the region. It is thus essential to know if any
833 * pseudo-locked regions exist within a cache hierarchy to prevent any
834 * attempts to create new pseudo-locked regions in the same hierarchy.
836 * Return: true if a pseudo-locked region exists in the hierarchy of @d or
837 * if it is not possible to test due to memory allocation issue,
840 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d)
842 cpumask_var_t cpu_with_psl;
843 struct rdt_resource *r;
844 struct rdt_domain *d_i;
847 /* Walking r->domains, ensure it can't race with cpuhp */
848 lockdep_assert_cpus_held();
850 if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL))
854 * First determine which cpus have pseudo-locked regions
855 * associated with them.
857 for_each_alloc_capable_rdt_resource(r) {
858 list_for_each_entry(d_i, &r->domains, list) {
860 cpumask_or(cpu_with_psl, cpu_with_psl,
866 * Next test if new pseudo-locked region would intersect with
869 if (cpumask_intersects(&d->cpu_mask, cpu_with_psl))
872 free_cpumask_var(cpu_with_psl);
877 * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory
878 * @_plr: pseudo-lock region to measure
880 * There is no deterministic way to test if a memory region is cached. One
881 * way is to measure how long it takes to read the memory, the speed of
882 * access is a good way to learn how close to the cpu the data was. Even
883 * more, if the prefetcher is disabled and the memory is read at a stride
884 * of half the cache line, then a cache miss will be easy to spot since the
885 * read of the first half would be significantly slower than the read of
888 * Return: 0. Waiter on waitqueue will be woken on completion.
890 static int measure_cycles_lat_fn(void *_plr)
892 struct pseudo_lock_region *plr = _plr;
893 u32 saved_low, saved_high;
900 * Disable hardware prefetchers.
902 rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
903 wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
904 mem_r = READ_ONCE(plr->kmem);
906 * Dummy execute of the time measurement to load the needed
907 * instructions into the L1 instruction cache.
909 start = rdtsc_ordered();
910 for (i = 0; i < plr->size; i += 32) {
911 start = rdtsc_ordered();
912 asm volatile("mov (%0,%1,1), %%eax\n\t"
914 : "r" (mem_r), "r" (i)
916 end = rdtsc_ordered();
917 trace_pseudo_lock_mem_latency((u32)(end - start));
919 wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
921 plr->thread_done = 1;
922 wake_up_interruptible(&plr->lock_thread_wq);
927 * Create a perf_event_attr for the hit and miss perf events that will
928 * be used during the performance measurement. A perf_event maintains
929 * a pointer to its perf_event_attr so a unique attribute structure is
930 * created for each perf_event.
932 * The actual configuration of the event is set right before use in order
933 * to use the X86_CONFIG macro.
935 static struct perf_event_attr perf_miss_attr = {
936 .type = PERF_TYPE_RAW,
937 .size = sizeof(struct perf_event_attr),
943 static struct perf_event_attr perf_hit_attr = {
944 .type = PERF_TYPE_RAW,
945 .size = sizeof(struct perf_event_attr),
951 struct residency_counts {
952 u64 miss_before, hits_before;
953 u64 miss_after, hits_after;
956 static int measure_residency_fn(struct perf_event_attr *miss_attr,
957 struct perf_event_attr *hit_attr,
958 struct pseudo_lock_region *plr,
959 struct residency_counts *counts)
961 u64 hits_before = 0, hits_after = 0, miss_before = 0, miss_after = 0;
962 struct perf_event *miss_event, *hit_event;
963 int hit_pmcnum, miss_pmcnum;
964 u32 saved_low, saved_high;
965 unsigned int line_size;
971 miss_event = perf_event_create_kernel_counter(miss_attr, plr->cpu,
973 if (IS_ERR(miss_event))
976 hit_event = perf_event_create_kernel_counter(hit_attr, plr->cpu,
978 if (IS_ERR(hit_event))
983 * Check any possible error state of events used by performing
986 if (perf_event_read_local(miss_event, &tmp, NULL, NULL)) {
990 if (perf_event_read_local(hit_event, &tmp, NULL, NULL)) {
996 * Disable hardware prefetchers.
998 rdmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
999 wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
1001 /* Initialize rest of local variables */
1003 * Performance event has been validated right before this with
1004 * interrupts disabled - it is thus safe to read the counter index.
1006 miss_pmcnum = x86_perf_rdpmc_index(miss_event);
1007 hit_pmcnum = x86_perf_rdpmc_index(hit_event);
1008 line_size = READ_ONCE(plr->line_size);
1009 mem_r = READ_ONCE(plr->kmem);
1010 size = READ_ONCE(plr->size);
1013 * Read counter variables twice - first to load the instructions
1014 * used in L1 cache, second to capture accurate value that does not
1015 * include cache misses incurred because of instruction loads.
1017 rdpmcl(hit_pmcnum, hits_before);
1018 rdpmcl(miss_pmcnum, miss_before);
1020 * From SDM: Performing back-to-back fast reads are not guaranteed
1022 * Use LFENCE to ensure all previous instructions are retired
1023 * before proceeding.
1026 rdpmcl(hit_pmcnum, hits_before);
1027 rdpmcl(miss_pmcnum, miss_before);
1029 * Use LFENCE to ensure all previous instructions are retired
1030 * before proceeding.
1033 for (i = 0; i < size; i += line_size) {
1035 * Add a barrier to prevent speculative execution of this
1036 * loop reading beyond the end of the buffer.
1039 asm volatile("mov (%0,%1,1), %%eax\n\t"
1041 : "r" (mem_r), "r" (i)
1042 : "%eax", "memory");
1045 * Use LFENCE to ensure all previous instructions are retired
1046 * before proceeding.
1049 rdpmcl(hit_pmcnum, hits_after);
1050 rdpmcl(miss_pmcnum, miss_after);
1052 * Use LFENCE to ensure all previous instructions are retired
1053 * before proceeding.
1056 /* Re-enable hardware prefetchers */
1057 wrmsr(MSR_MISC_FEATURE_CONTROL, saved_low, saved_high);
1060 perf_event_release_kernel(hit_event);
1062 perf_event_release_kernel(miss_event);
1065 * All counts will be zero on failure.
1067 counts->miss_before = miss_before;
1068 counts->hits_before = hits_before;
1069 counts->miss_after = miss_after;
1070 counts->hits_after = hits_after;
1074 static int measure_l2_residency(void *_plr)
1076 struct pseudo_lock_region *plr = _plr;
1077 struct residency_counts counts = {0};
1080 * Non-architectural event for the Goldmont Microarchitecture
1081 * from Intel x86 Architecture Software Developer Manual (SDM):
1082 * MEM_LOAD_UOPS_RETIRED D1H (event number)
1087 switch (boot_cpu_data.x86_model) {
1088 case INTEL_FAM6_ATOM_GOLDMONT:
1089 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
1090 perf_miss_attr.config = X86_CONFIG(.event = 0xd1,
1092 perf_hit_attr.config = X86_CONFIG(.event = 0xd1,
1099 measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts);
1101 * If a failure prevented the measurements from succeeding
1102 * tracepoints will still be written and all counts will be zero.
1104 trace_pseudo_lock_l2(counts.hits_after - counts.hits_before,
1105 counts.miss_after - counts.miss_before);
1107 plr->thread_done = 1;
1108 wake_up_interruptible(&plr->lock_thread_wq);
1112 static int measure_l3_residency(void *_plr)
1114 struct pseudo_lock_region *plr = _plr;
1115 struct residency_counts counts = {0};
1118 * On Broadwell Microarchitecture the MEM_LOAD_UOPS_RETIRED event
1119 * has two "no fix" errata associated with it: BDM35 and BDM100. On
1120 * this platform the following events are used instead:
1121 * LONGEST_LAT_CACHE 2EH (Documented in SDM)
1126 switch (boot_cpu_data.x86_model) {
1127 case INTEL_FAM6_BROADWELL_X:
1128 /* On BDW the hit event counts references, not hits */
1129 perf_hit_attr.config = X86_CONFIG(.event = 0x2e,
1131 perf_miss_attr.config = X86_CONFIG(.event = 0x2e,
1138 measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts);
1140 * If a failure prevented the measurements from succeeding
1141 * tracepoints will still be written and all counts will be zero.
1144 counts.miss_after -= counts.miss_before;
1145 if (boot_cpu_data.x86_model == INTEL_FAM6_BROADWELL_X) {
1147 * On BDW references and misses are counted, need to adjust.
1148 * Sometimes the "hits" counter is a bit more than the
1149 * references, for example, x references but x + 1 hits.
1150 * To not report invalid hit values in this case we treat
1151 * that as misses equal to references.
1153 /* First compute the number of cache references measured */
1154 counts.hits_after -= counts.hits_before;
1155 /* Next convert references to cache hits */
1156 counts.hits_after -= min(counts.miss_after, counts.hits_after);
1158 counts.hits_after -= counts.hits_before;
1161 trace_pseudo_lock_l3(counts.hits_after, counts.miss_after);
1163 plr->thread_done = 1;
1164 wake_up_interruptible(&plr->lock_thread_wq);
1169 * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region
1170 * @rdtgrp: Resource group to which the pseudo-locked region belongs.
1171 * @sel: Selector of which measurement to perform on a pseudo-locked region.
1173 * The measurement of latency to access a pseudo-locked region should be
1174 * done from a cpu that is associated with that pseudo-locked region.
1175 * Determine which cpu is associated with this region and start a thread on
1176 * that cpu to perform the measurement, wait for that thread to complete.
1178 * Return: 0 on success, <0 on failure
1180 static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
1182 struct pseudo_lock_region *plr = rdtgrp->plr;
1183 struct task_struct *thread;
1188 mutex_lock(&rdtgroup_mutex);
1190 if (rdtgrp->flags & RDT_DELETED) {
1200 plr->thread_done = 0;
1201 cpu = cpumask_first(&plr->d->cpu_mask);
1202 if (!cpu_online(cpu)) {
1210 thread = kthread_create_on_node(measure_cycles_lat_fn, plr,
1212 "pseudo_lock_measure/%u",
1215 thread = kthread_create_on_node(measure_l2_residency, plr,
1217 "pseudo_lock_measure/%u",
1220 thread = kthread_create_on_node(measure_l3_residency, plr,
1222 "pseudo_lock_measure/%u",
1227 if (IS_ERR(thread)) {
1228 ret = PTR_ERR(thread);
1231 kthread_bind(thread, cpu);
1232 wake_up_process(thread);
1234 ret = wait_event_interruptible(plr->lock_thread_wq,
1235 plr->thread_done == 1);
1242 mutex_unlock(&rdtgroup_mutex);
1247 static ssize_t pseudo_lock_measure_trigger(struct file *file,
1248 const char __user *user_buf,
1249 size_t count, loff_t *ppos)
1251 struct rdtgroup *rdtgrp = file->private_data;
1257 buf_size = min(count, (sizeof(buf) - 1));
1258 if (copy_from_user(buf, user_buf, buf_size))
1261 buf[buf_size] = '\0';
1262 ret = kstrtoint(buf, 10, &sel);
1264 if (sel != 1 && sel != 2 && sel != 3)
1266 ret = debugfs_file_get(file->f_path.dentry);
1269 ret = pseudo_lock_measure_cycles(rdtgrp, sel);
1272 debugfs_file_put(file->f_path.dentry);
1278 static const struct file_operations pseudo_measure_fops = {
1279 .write = pseudo_lock_measure_trigger,
1280 .open = simple_open,
1281 .llseek = default_llseek,
1285 * rdtgroup_pseudo_lock_create - Create a pseudo-locked region
1286 * @rdtgrp: resource group to which pseudo-lock region belongs
1288 * Called when a resource group in the pseudo-locksetup mode receives a
1289 * valid schemata that should be pseudo-locked. Since the resource group is
1290 * in pseudo-locksetup mode the &struct pseudo_lock_region has already been
1291 * allocated and initialized with the essential information. If a failure
1292 * occurs the resource group remains in the pseudo-locksetup mode with the
1293 * &struct pseudo_lock_region associated with it, but cleared from all
1294 * information and ready for the user to re-attempt pseudo-locking by
1295 * writing the schemata again.
1297 * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0
1298 * on failure. Descriptive error will be written to last_cmd_status buffer.
1300 int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
1302 struct pseudo_lock_region *plr = rdtgrp->plr;
1303 struct task_struct *thread;
1304 unsigned int new_minor;
1308 ret = pseudo_lock_region_alloc(plr);
1312 ret = pseudo_lock_cstates_constrain(plr);
1318 plr->thread_done = 0;
1320 thread = kthread_create_on_node(pseudo_lock_fn, rdtgrp,
1321 cpu_to_node(plr->cpu),
1322 "pseudo_lock/%u", plr->cpu);
1323 if (IS_ERR(thread)) {
1324 ret = PTR_ERR(thread);
1325 rdt_last_cmd_printf("Locking thread returned error %d\n", ret);
1329 kthread_bind(thread, plr->cpu);
1330 wake_up_process(thread);
1332 ret = wait_event_interruptible(plr->lock_thread_wq,
1333 plr->thread_done == 1);
1336 * If the thread does not get on the CPU for whatever
1337 * reason and the process which sets up the region is
1338 * interrupted then this will leave the thread in runnable
1339 * state and once it gets on the CPU it will dereference
1340 * the cleared, but not freed, plr struct resulting in an
1341 * empty pseudo-locking loop.
1343 rdt_last_cmd_puts("Locking thread interrupted\n");
1347 ret = pseudo_lock_minor_get(&new_minor);
1349 rdt_last_cmd_puts("Unable to obtain a new minor number\n");
1354 * Unlock access but do not release the reference. The
1355 * pseudo-locked region will still be here on return.
1357 * The mutex has to be released temporarily to avoid a potential
1358 * deadlock with the mm->mmap_lock which is obtained in the
1359 * device_create() and debugfs_create_dir() callpath below as well as
1360 * before the mmap() callback is called.
1362 mutex_unlock(&rdtgroup_mutex);
1364 if (!IS_ERR_OR_NULL(debugfs_resctrl)) {
1365 plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name,
1367 if (!IS_ERR_OR_NULL(plr->debugfs_dir))
1368 debugfs_create_file("pseudo_lock_measure", 0200,
1369 plr->debugfs_dir, rdtgrp,
1370 &pseudo_measure_fops);
1373 dev = device_create(&pseudo_lock_class, NULL,
1374 MKDEV(pseudo_lock_major, new_minor),
1375 rdtgrp, "%s", rdtgrp->kn->name);
1377 mutex_lock(&rdtgroup_mutex);
1381 rdt_last_cmd_printf("Failed to create character device: %d\n",
1386 /* We released the mutex - check if group was removed while we did so */
1387 if (rdtgrp->flags & RDT_DELETED) {
1392 plr->minor = new_minor;
1394 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED;
1395 closid_free(rdtgrp->closid);
1396 rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444);
1397 rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444);
1403 device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor));
1405 debugfs_remove_recursive(plr->debugfs_dir);
1406 pseudo_lock_minor_release(new_minor);
1408 pseudo_lock_cstates_relax(plr);
1410 pseudo_lock_region_clear(plr);
1416 * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region
1417 * @rdtgrp: resource group to which the pseudo-locked region belongs
1419 * The removal of a pseudo-locked region can be initiated when the resource
1420 * group is removed from user space via a "rmdir" from userspace or the
1421 * unmount of the resctrl filesystem. On removal the resource group does
1422 * not go back to pseudo-locksetup mode before it is removed, instead it is
1423 * removed directly. There is thus asymmetry with the creation where the
1424 * &struct pseudo_lock_region is removed here while it was not created in
1425 * rdtgroup_pseudo_lock_create().
1429 void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp)
1431 struct pseudo_lock_region *plr = rdtgrp->plr;
1433 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1435 * Default group cannot be a pseudo-locked region so we can
1438 closid_free(rdtgrp->closid);
1442 pseudo_lock_cstates_relax(plr);
1443 debugfs_remove_recursive(rdtgrp->plr->debugfs_dir);
1444 device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor));
1445 pseudo_lock_minor_release(plr->minor);
1448 pseudo_lock_free(rdtgrp);
1451 static int pseudo_lock_dev_open(struct inode *inode, struct file *filp)
1453 struct rdtgroup *rdtgrp;
1455 mutex_lock(&rdtgroup_mutex);
1457 rdtgrp = region_find_by_minor(iminor(inode));
1459 mutex_unlock(&rdtgroup_mutex);
1463 filp->private_data = rdtgrp;
1464 atomic_inc(&rdtgrp->waitcount);
1465 /* Perform a non-seekable open - llseek is not supported */
1466 filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1468 mutex_unlock(&rdtgroup_mutex);
1473 static int pseudo_lock_dev_release(struct inode *inode, struct file *filp)
1475 struct rdtgroup *rdtgrp;
1477 mutex_lock(&rdtgroup_mutex);
1478 rdtgrp = filp->private_data;
1481 mutex_unlock(&rdtgroup_mutex);
1484 filp->private_data = NULL;
1485 atomic_dec(&rdtgrp->waitcount);
1486 mutex_unlock(&rdtgroup_mutex);
1490 static int pseudo_lock_dev_mremap(struct vm_area_struct *area)
1496 static const struct vm_operations_struct pseudo_mmap_ops = {
1497 .mremap = pseudo_lock_dev_mremap,
1500 static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
1502 unsigned long vsize = vma->vm_end - vma->vm_start;
1503 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
1504 struct pseudo_lock_region *plr;
1505 struct rdtgroup *rdtgrp;
1506 unsigned long physical;
1507 unsigned long psize;
1509 mutex_lock(&rdtgroup_mutex);
1511 rdtgrp = filp->private_data;
1514 mutex_unlock(&rdtgroup_mutex);
1521 mutex_unlock(&rdtgroup_mutex);
1526 * Task is required to run with affinity to the cpus associated
1527 * with the pseudo-locked region. If this is not the case the task
1528 * may be scheduled elsewhere and invalidate entries in the
1529 * pseudo-locked region.
1531 if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) {
1532 mutex_unlock(&rdtgroup_mutex);
1536 physical = __pa(plr->kmem) >> PAGE_SHIFT;
1537 psize = plr->size - off;
1539 if (off > plr->size) {
1540 mutex_unlock(&rdtgroup_mutex);
1545 * Ensure changes are carried directly to the memory being mapped,
1546 * do not allow copy-on-write mapping.
1548 if (!(vma->vm_flags & VM_SHARED)) {
1549 mutex_unlock(&rdtgroup_mutex);
1553 if (vsize > psize) {
1554 mutex_unlock(&rdtgroup_mutex);
1558 memset(plr->kmem + off, 0, vsize);
1560 if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff,
1561 vsize, vma->vm_page_prot)) {
1562 mutex_unlock(&rdtgroup_mutex);
1565 vma->vm_ops = &pseudo_mmap_ops;
1566 mutex_unlock(&rdtgroup_mutex);
1570 static const struct file_operations pseudo_lock_dev_fops = {
1571 .owner = THIS_MODULE,
1572 .llseek = no_llseek,
1575 .open = pseudo_lock_dev_open,
1576 .release = pseudo_lock_dev_release,
1577 .mmap = pseudo_lock_dev_mmap,
1580 int rdt_pseudo_lock_init(void)
1584 ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops);
1588 pseudo_lock_major = ret;
1590 ret = class_register(&pseudo_lock_class);
1592 unregister_chrdev(pseudo_lock_major, "pseudo_lock");
1599 void rdt_pseudo_lock_release(void)
1601 class_unregister(&pseudo_lock_class);
1602 unregister_chrdev(pseudo_lock_major, "pseudo_lock");
1603 pseudo_lock_major = 0;